Merge "Clean up dead code in controllerconfig"
This commit is contained in:
commit
02136face1
14
.zuul.yaml
14
.zuul.yaml
|
@ -13,7 +13,6 @@
|
||||||
- sysinv-tox-pylint
|
- sysinv-tox-pylint
|
||||||
- sysinv-tox-bandit
|
- sysinv-tox-bandit
|
||||||
- controllerconfig-tox-flake8
|
- controllerconfig-tox-flake8
|
||||||
- controllerconfig-tox-py27
|
|
||||||
- controllerconfig-tox-pylint
|
- controllerconfig-tox-pylint
|
||||||
- cgtsclient-tox-py27
|
- cgtsclient-tox-py27
|
||||||
- cgtsclient-tox-py36
|
- cgtsclient-tox-py36
|
||||||
|
@ -28,7 +27,6 @@
|
||||||
- sysinv-tox-pylint
|
- sysinv-tox-pylint
|
||||||
- sysinv-tox-bandit
|
- sysinv-tox-bandit
|
||||||
- controllerconfig-tox-flake8
|
- controllerconfig-tox-flake8
|
||||||
- controllerconfig-tox-py27
|
|
||||||
- controllerconfig-tox-pylint
|
- controllerconfig-tox-pylint
|
||||||
- cgtsclient-tox-py27
|
- cgtsclient-tox-py27
|
||||||
- cgtsclient-tox-py36
|
- cgtsclient-tox-py36
|
||||||
|
@ -114,18 +112,6 @@
|
||||||
tox_envlist: flake8
|
tox_envlist: flake8
|
||||||
tox_extra_args: -c controllerconfig/controllerconfig/tox.ini
|
tox_extra_args: -c controllerconfig/controllerconfig/tox.ini
|
||||||
|
|
||||||
- job:
|
|
||||||
name: controllerconfig-tox-py27
|
|
||||||
parent: tox
|
|
||||||
description: Run py27 tests for controllerconfig
|
|
||||||
required-projects:
|
|
||||||
- starlingx/fault
|
|
||||||
files:
|
|
||||||
- controllerconfig/*
|
|
||||||
vars:
|
|
||||||
tox_envlist: py27
|
|
||||||
tox_extra_args: -c controllerconfig/controllerconfig/tox.ini
|
|
||||||
|
|
||||||
- job:
|
- job:
|
||||||
name: controllerconfig-tox-pylint
|
name: controllerconfig-tox-pylint
|
||||||
parent: tox
|
parent: tox
|
||||||
|
|
|
@ -1,2 +1,2 @@
|
||||||
SRC_DIR="controllerconfig"
|
SRC_DIR="controllerconfig"
|
||||||
TIS_PATCH_VER=151
|
TIS_PATCH_VER=152
|
||||||
|
|
|
@ -57,10 +57,7 @@ mkdir -p $RPM_BUILD_ROOT/wheels
|
||||||
install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/
|
install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/
|
||||||
|
|
||||||
install -d -m 755 %{buildroot}%{local_bindir}
|
install -d -m 755 %{buildroot}%{local_bindir}
|
||||||
install -p -D -m 700 scripts/keyringstaging %{buildroot}%{local_bindir}/keyringstaging
|
|
||||||
install -p -D -m 700 scripts/openstack_update_admin_password %{buildroot}%{local_bindir}/openstack_update_admin_password
|
install -p -D -m 700 scripts/openstack_update_admin_password %{buildroot}%{local_bindir}/openstack_update_admin_password
|
||||||
install -p -D -m 700 scripts/install_clone.py %{buildroot}%{local_bindir}/install_clone
|
|
||||||
install -p -D -m 700 scripts/finish_install_clone.sh %{buildroot}%{local_bindir}/finish_install_clone.sh
|
|
||||||
|
|
||||||
install -d -m 755 %{buildroot}%{local_goenabledd}
|
install -d -m 755 %{buildroot}%{local_goenabledd}
|
||||||
install -p -D -m 700 scripts/config_goenabled_check.sh %{buildroot}%{local_goenabledd}/config_goenabled_check.sh
|
install -p -D -m 700 scripts/config_goenabled_check.sh %{buildroot}%{local_goenabledd}/config_goenabled_check.sh
|
||||||
|
@ -74,13 +71,12 @@ install -p -D -m 755 upgrade-scripts/* %{buildroot}%{local_etc_upgraded}/
|
||||||
|
|
||||||
install -d -m 755 %{buildroot}%{local_etc_systemd}
|
install -d -m 755 %{buildroot}%{local_etc_systemd}
|
||||||
install -p -D -m 664 scripts/controllerconfig.service %{buildroot}%{local_etc_systemd}/controllerconfig.service
|
install -p -D -m 664 scripts/controllerconfig.service %{buildroot}%{local_etc_systemd}/controllerconfig.service
|
||||||
#install -p -D -m 664 scripts/config.service %{buildroot}%{local_etc_systemd}/config.service
|
|
||||||
|
|
||||||
%post
|
%post
|
||||||
systemctl enable controllerconfig.service
|
systemctl enable controllerconfig.service
|
||||||
|
|
||||||
%clean
|
%clean
|
||||||
rm -rf $RPM_BUILD_ROOT
|
rm -rf $RPM_BUILD_ROOT
|
||||||
|
|
||||||
%files
|
%files
|
||||||
%defattr(-,root,root,-)
|
%defattr(-,root,root,-)
|
||||||
|
|
|
@ -1,34 +1,10 @@
|
||||||
#
|
#
|
||||||
# Copyright (c) 2015-2019 Wind River Systems, Inc.
|
# Copyright (c) 2015-2020 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
from controllerconfig.common.validator import validate # noqa: F401
|
|
||||||
from controllerconfig.common.configobjects import Network # noqa: F401
|
|
||||||
from controllerconfig.common.configobjects import DEFAULT_CONFIG # noqa: F401
|
|
||||||
from controllerconfig.common.configobjects import REGION_CONFIG # noqa: F401
|
|
||||||
from controllerconfig.common.configobjects import DEFAULT_NAMES # noqa: F401
|
|
||||||
from controllerconfig.common.configobjects import HP_NAMES # noqa: F401
|
|
||||||
from controllerconfig.common.configobjects import SUBCLOUD_CONFIG # noqa: F401
|
|
||||||
from controllerconfig.common.configobjects import MGMT_TYPE # noqa: F401
|
|
||||||
from controllerconfig.common.configobjects import INFRA_TYPE # noqa: F401
|
|
||||||
from controllerconfig.common.configobjects import OAM_TYPE # noqa: F401
|
|
||||||
from controllerconfig.common.configobjects import NETWORK_PREFIX_NAMES # noqa: F401
|
|
||||||
from controllerconfig.common.configobjects import HOST_XML_ATTRIBUTES # noqa: F401
|
|
||||||
from controllerconfig.common.configobjects import DEFAULT_DOMAIN_NAME # noqa: F401
|
|
||||||
from controllerconfig.common.exceptions import ConfigError # noqa: F401
|
from controllerconfig.common.exceptions import ConfigError # noqa: F401
|
||||||
from controllerconfig.common.exceptions import ConfigFail # noqa: F401
|
|
||||||
from controllerconfig.common.exceptions import ValidateFail # noqa: F401
|
from controllerconfig.common.exceptions import ValidateFail # noqa: F401
|
||||||
from controllerconfig.utils import is_valid_vlan # noqa: F401
|
|
||||||
from controllerconfig.utils import is_mtu_valid # noqa: F401
|
|
||||||
from controllerconfig.utils import validate_network_str # noqa: F401
|
from controllerconfig.utils import validate_network_str # noqa: F401
|
||||||
from controllerconfig.utils import validate_address_str # noqa: F401
|
from controllerconfig.utils import validate_address_str # noqa: F401
|
||||||
from controllerconfig.utils import validate_address # noqa: F401
|
|
||||||
from controllerconfig.utils import is_valid_url # noqa: F401
|
|
||||||
from controllerconfig.utils import is_valid_domain_or_ip # noqa: F401
|
|
||||||
from controllerconfig.utils import ip_version_to_string # noqa: F401
|
|
||||||
from controllerconfig.utils import lag_mode_to_str # noqa: F401
|
|
||||||
from controllerconfig.utils import validate_openstack_password # noqa: F401
|
|
||||||
from controllerconfig.utils import validate_nameserver_address_str # noqa: F401
|
|
||||||
from controllerconfig.utils import extract_openstack_password_rules_from_file # noqa: F401
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,712 +0,0 @@
|
||||||
#
|
|
||||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
|
|
||||||
"""
|
|
||||||
Clone a Configured System and Install the image on another
|
|
||||||
identical hardware or the same hardware.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import print_function
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import glob
|
|
||||||
import time
|
|
||||||
import shutil
|
|
||||||
import netaddr
|
|
||||||
import tempfile
|
|
||||||
import fileinput
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
from controllerconfig.common import constants
|
|
||||||
from sysinv.common import constants as si_const
|
|
||||||
from controllerconfig import sysinv_api
|
|
||||||
import tsconfig.tsconfig as tsconfig
|
|
||||||
from controllerconfig.common import log
|
|
||||||
from controllerconfig.common.exceptions import CloneFail
|
|
||||||
from controllerconfig.common.exceptions import BackupFail
|
|
||||||
from controllerconfig import utils
|
|
||||||
from controllerconfig import backup_restore
|
|
||||||
|
|
||||||
DEBUG = False
|
|
||||||
LOG = log.get_logger(__name__)
|
|
||||||
DEVNULL = open(os.devnull, 'w')
|
|
||||||
CLONE_ARCHIVE_DIR = "clone-archive"
|
|
||||||
CLONE_ISO_INI = ".cloneiso.ini"
|
|
||||||
NAME = "name"
|
|
||||||
INSTALLED = "installed_at"
|
|
||||||
RESULT = "result"
|
|
||||||
IN_PROGRESS = "in-progress"
|
|
||||||
FAIL = "failed"
|
|
||||||
OK = "ok"
|
|
||||||
|
|
||||||
|
|
||||||
def clone_status():
|
|
||||||
""" Check status of last install-clone. """
|
|
||||||
INI_FILE1 = os.path.join("/", CLONE_ARCHIVE_DIR, CLONE_ISO_INI)
|
|
||||||
INI_FILE2 = os.path.join(tsconfig.PLATFORM_CONF_PATH, CLONE_ISO_INI)
|
|
||||||
name = "unknown"
|
|
||||||
result = "unknown"
|
|
||||||
installed_at = "unknown time"
|
|
||||||
for ini_file in [INI_FILE1, INI_FILE2]:
|
|
||||||
if os.path.exists(ini_file):
|
|
||||||
with open(ini_file) as f:
|
|
||||||
s = f.read()
|
|
||||||
for line in s.split("\n"):
|
|
||||||
if line.startswith(NAME):
|
|
||||||
name = line.split("=")[1].strip()
|
|
||||||
elif line.startswith(RESULT):
|
|
||||||
result = line.split("=")[1].strip()
|
|
||||||
elif line.startswith(INSTALLED):
|
|
||||||
installed_at = line.split("=")[1].strip()
|
|
||||||
break # one file was found, skip the other file
|
|
||||||
if result != "unknown":
|
|
||||||
if result == OK:
|
|
||||||
print("\nInstallation of cloned image [{}] was successful at {}\n"
|
|
||||||
.format(name, installed_at))
|
|
||||||
elif result == FAIL:
|
|
||||||
print("\nInstallation of cloned image [{}] failed at {}\n"
|
|
||||||
.format(name, installed_at))
|
|
||||||
else:
|
|
||||||
print("\ninstall-clone is in progress.\n")
|
|
||||||
else:
|
|
||||||
print("\nCloned image is not installed on this node.\n")
|
|
||||||
|
|
||||||
|
|
||||||
def check_size(archive_dir):
|
|
||||||
""" Check if there is enough space to create iso. """
|
|
||||||
overhead_bytes = 1024 ** 3 # extra GB for staging directory
|
|
||||||
# Size of the cloned iso is directly proportional to the
|
|
||||||
# installed package repository (note that patches are a part of
|
|
||||||
# the system archive size below).
|
|
||||||
# 1G overhead size added (above) will accomodate the temporary
|
|
||||||
# workspace (updating system archive etc) needed to create the iso.
|
|
||||||
feed_dir = os.path.join('/www', 'pages', 'feed',
|
|
||||||
'rel-' + tsconfig.SW_VERSION)
|
|
||||||
overhead_bytes += backup_restore.backup_std_dir_size(feed_dir)
|
|
||||||
|
|
||||||
clone_size = (
|
|
||||||
overhead_bytes +
|
|
||||||
backup_restore.backup_etc_size() +
|
|
||||||
backup_restore.backup_config_size(tsconfig.CONFIG_PATH) +
|
|
||||||
backup_restore.backup_puppet_data_size(constants.HIERADATA_PERMDIR) +
|
|
||||||
backup_restore.backup_keyring_size(backup_restore.keyring_permdir) +
|
|
||||||
backup_restore.backup_ldap_size() +
|
|
||||||
backup_restore.backup_postgres_size() +
|
|
||||||
backup_restore.backup_std_dir_size(backup_restore.home_permdir) +
|
|
||||||
backup_restore.backup_std_dir_size(backup_restore.patching_permdir) +
|
|
||||||
backup_restore.backup_std_dir_size(
|
|
||||||
backup_restore.patching_repo_permdir) +
|
|
||||||
backup_restore.backup_std_dir_size(backup_restore.extension_permdir) +
|
|
||||||
backup_restore.backup_std_dir_size(
|
|
||||||
backup_restore.patch_vault_permdir) +
|
|
||||||
backup_restore.backup_armada_manifest_size(
|
|
||||||
constants.ARMADA_PERMDIR) +
|
|
||||||
backup_restore.backup_std_dir_size(
|
|
||||||
constants.HELM_CHARTS_PERMDIR) +
|
|
||||||
backup_restore.backup_mariadb_size())
|
|
||||||
|
|
||||||
archive_dir_free_space = \
|
|
||||||
utils.filesystem_get_free_space(archive_dir)
|
|
||||||
|
|
||||||
if clone_size > archive_dir_free_space:
|
|
||||||
print("\nArchive directory (%s) does not have enough free "
|
|
||||||
"space (%s), estimated size to create image is %s." %
|
|
||||||
(archive_dir,
|
|
||||||
utils.print_bytes(archive_dir_free_space),
|
|
||||||
utils.print_bytes(clone_size)))
|
|
||||||
raise CloneFail("Not enough free space.\n")
|
|
||||||
|
|
||||||
|
|
||||||
def update_bootloader_default(bl_file, host):
|
|
||||||
""" Update bootloader files for cloned image """
|
|
||||||
if not os.path.exists(bl_file):
|
|
||||||
LOG.error("{} does not exist".format(bl_file))
|
|
||||||
raise CloneFail("{} does not exist".format(os.path.basename(bl_file)))
|
|
||||||
|
|
||||||
# Tags should be in sync with common-bsp/files/centos.syslinux.cfg
|
|
||||||
# and common-bsp/files/grub.cfg
|
|
||||||
STANDARD_STANDARD = '0'
|
|
||||||
STANDARD_EXTENDED = 'S0'
|
|
||||||
AIO_STANDARD = '2'
|
|
||||||
AIO_EXTENDED = 'S2'
|
|
||||||
AIO_LL_STANDARD = '4'
|
|
||||||
AIO_LL_EXTENDED = 'S4'
|
|
||||||
if "grub.cfg" in bl_file:
|
|
||||||
STANDARD_STANDARD = 'standard>serial>' + \
|
|
||||||
si_const.SYSTEM_SECURITY_PROFILE_STANDARD
|
|
||||||
STANDARD_EXTENDED = 'standard>serial>' + \
|
|
||||||
si_const.SYSTEM_SECURITY_PROFILE_EXTENDED
|
|
||||||
AIO_STANDARD = 'aio>serial>' + \
|
|
||||||
si_const.SYSTEM_SECURITY_PROFILE_STANDARD
|
|
||||||
AIO_EXTENDED = 'aio>serial>' + \
|
|
||||||
si_const.SYSTEM_SECURITY_PROFILE_EXTENDED
|
|
||||||
AIO_LL_STANDARD = 'aio-lowlat>serial>' + \
|
|
||||||
si_const.SYSTEM_SECURITY_PROFILE_STANDARD
|
|
||||||
AIO_LL_EXTENDED = 'aio-lowlat>serial>' + \
|
|
||||||
si_const.SYSTEM_SECURITY_PROFILE_EXTENDED
|
|
||||||
SUBMENUITEM_TBOOT = 'tboot'
|
|
||||||
SUBMENUITEM_SECUREBOOT = 'secureboot'
|
|
||||||
|
|
||||||
timeout_line = None
|
|
||||||
default_line = None
|
|
||||||
default_label_num = STANDARD_STANDARD
|
|
||||||
if utils.get_system_type() == si_const.TIS_AIO_BUILD:
|
|
||||||
if si_const.LOWLATENCY in tsconfig.subfunctions:
|
|
||||||
default_label_num = AIO_LL_STANDARD
|
|
||||||
else:
|
|
||||||
default_label_num = AIO_STANDARD
|
|
||||||
if (tsconfig.security_profile ==
|
|
||||||
si_const.SYSTEM_SECURITY_PROFILE_EXTENDED):
|
|
||||||
default_label_num = STANDARD_EXTENDED
|
|
||||||
if utils.get_system_type() == si_const.TIS_AIO_BUILD:
|
|
||||||
if si_const.LOWLATENCY in tsconfig.subfunctions:
|
|
||||||
default_label_num = AIO_LL_EXTENDED
|
|
||||||
else:
|
|
||||||
default_label_num = AIO_EXTENDED
|
|
||||||
if "grub.cfg" in bl_file:
|
|
||||||
if host.tboot is not None:
|
|
||||||
if host.tboot == "true":
|
|
||||||
default_label_num = default_label_num + '>' + \
|
|
||||||
SUBMENUITEM_TBOOT
|
|
||||||
else:
|
|
||||||
default_label_num = default_label_num + '>' + \
|
|
||||||
SUBMENUITEM_SECUREBOOT
|
|
||||||
|
|
||||||
try:
|
|
||||||
with open(bl_file) as f:
|
|
||||||
s = f.read()
|
|
||||||
for line in s.split("\n"):
|
|
||||||
if line.startswith("timeout"):
|
|
||||||
timeout_line = line
|
|
||||||
elif line.startswith("default"):
|
|
||||||
default_line = line
|
|
||||||
|
|
||||||
if "grub.cfg" in bl_file:
|
|
||||||
replace = "default='{}'\ntimeout=10".format(default_label_num)
|
|
||||||
else: # isolinux format
|
|
||||||
replace = "default {}\ntimeout 10".format(default_label_num)
|
|
||||||
|
|
||||||
if default_line and timeout_line:
|
|
||||||
s = s.replace(default_line, "")
|
|
||||||
s = s.replace(timeout_line, replace)
|
|
||||||
elif default_line:
|
|
||||||
s = s.replace(default_line, replace)
|
|
||||||
elif timeout_line:
|
|
||||||
s = s.replace(timeout_line, replace)
|
|
||||||
else:
|
|
||||||
s = replace + s
|
|
||||||
|
|
||||||
s = re.sub(r'boot_device=[^\s]*',
|
|
||||||
'boot_device=%s' % host.boot_device,
|
|
||||||
s)
|
|
||||||
s = re.sub(r'rootfs_device=[^\s]*',
|
|
||||||
'rootfs_device=%s' % host.rootfs_device,
|
|
||||||
s)
|
|
||||||
s = re.sub(r'console=[^\s]*',
|
|
||||||
'console=%s' % host.console,
|
|
||||||
s)
|
|
||||||
|
|
||||||
with open(bl_file, "w") as f:
|
|
||||||
LOG.info("rewriting {}: label={} find=[{}][{}] replace=[{}]"
|
|
||||||
.format(bl_file, default_label_num, timeout_line,
|
|
||||||
default_line, replace.replace('\n', '<newline>')))
|
|
||||||
f.write(s)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
LOG.error("update_bootloader_default failed: {}".format(e))
|
|
||||||
raise CloneFail("Failed to update bootloader files")
|
|
||||||
|
|
||||||
|
|
||||||
def get_online_cpus():
|
|
||||||
""" Get max cpu id """
|
|
||||||
with open('/sys/devices/system/cpu/online') as f:
|
|
||||||
s = f.read()
|
|
||||||
max_cpu_id = s.split('-')[-1].strip()
|
|
||||||
LOG.info("Max cpu id:{} [{}]".format(max_cpu_id, s.strip()))
|
|
||||||
return max_cpu_id
|
|
||||||
return ""
|
|
||||||
|
|
||||||
|
|
||||||
def get_total_mem():
|
|
||||||
""" Get total memory size """
|
|
||||||
with open('/proc/meminfo') as f:
|
|
||||||
s = f.read()
|
|
||||||
for line in s.split("\n"):
|
|
||||||
if line.startswith("MemTotal:"):
|
|
||||||
mem_total = line.split()[1]
|
|
||||||
LOG.info("MemTotal:[{}]".format(mem_total))
|
|
||||||
return mem_total
|
|
||||||
return ""
|
|
||||||
|
|
||||||
|
|
||||||
def get_disk_size(disk):
|
|
||||||
""" Get the disk size """
|
|
||||||
disk_size = ""
|
|
||||||
try:
|
|
||||||
disk_size = subprocess.check_output(
|
|
||||||
['lsblk', '--nodeps', '--output', 'SIZE',
|
|
||||||
'--noheadings', '--bytes', disk])
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception(e)
|
|
||||||
LOG.error("Failed to get disk size [{}]".format(disk))
|
|
||||||
raise CloneFail("Failed to get disk size")
|
|
||||||
return disk_size.strip()
|
|
||||||
|
|
||||||
|
|
||||||
def create_ini_file(clone_archive_dir, iso_name):
|
|
||||||
"""Create clone ini file."""
|
|
||||||
interfaces = ""
|
|
||||||
my_hostname = utils.get_controller_hostname()
|
|
||||||
macs = sysinv_api.get_mac_addresses(my_hostname)
|
|
||||||
for intf in macs.keys():
|
|
||||||
interfaces += intf + " "
|
|
||||||
|
|
||||||
disk_paths = ""
|
|
||||||
for _, _, files in os.walk('/dev/disk/by-path'):
|
|
||||||
for f in files:
|
|
||||||
if f.startswith("pci-") and "part" not in f and "usb" not in f:
|
|
||||||
disk_size = get_disk_size('/dev/disk/by-path/' + f)
|
|
||||||
disk_paths += f + "#" + disk_size + " "
|
|
||||||
break # no need to go into sub-dirs.
|
|
||||||
|
|
||||||
LOG.info("create ini: {} {}".format(macs, files))
|
|
||||||
with open(os.path.join(clone_archive_dir, CLONE_ISO_INI), 'w') as f:
|
|
||||||
f.write('[clone_iso]\n')
|
|
||||||
f.write('name=' + iso_name + '\n')
|
|
||||||
f.write('host=' + my_hostname + '\n')
|
|
||||||
f.write('created_at=' + time.strftime("%Y-%m-%d %H:%M:%S %Z")
|
|
||||||
+ '\n')
|
|
||||||
f.write('interfaces=' + interfaces + '\n')
|
|
||||||
f.write('disks=' + disk_paths + '\n')
|
|
||||||
f.write('cpus=' + get_online_cpus() + '\n')
|
|
||||||
f.write('mem=' + get_total_mem() + '\n')
|
|
||||||
LOG.info("create ini: ({}) ({})".format(interfaces, disk_paths))
|
|
||||||
|
|
||||||
|
|
||||||
def create_iso(iso_name, archive_dir):
|
|
||||||
""" Create iso image. This is modelled after
|
|
||||||
the cgcs-root/build-tools/build-iso tool. """
|
|
||||||
try:
|
|
||||||
controller_0 = sysinv_api.get_host_data('controller-0')
|
|
||||||
except Exception as e:
|
|
||||||
e_log = "Failed to retrieve controller-0 inventory details."
|
|
||||||
LOG.exception(e_log)
|
|
||||||
raise CloneFail(e_log)
|
|
||||||
|
|
||||||
iso_dir = os.path.join(archive_dir, 'isolinux')
|
|
||||||
clone_archive_dir = os.path.join(iso_dir, CLONE_ARCHIVE_DIR)
|
|
||||||
output = None
|
|
||||||
tmpdir = None
|
|
||||||
total_steps = 6
|
|
||||||
step = 1
|
|
||||||
print ("\nCreating ISO:")
|
|
||||||
|
|
||||||
# Add the correct kick-start file to the image
|
|
||||||
ks_file = "controller_ks.cfg"
|
|
||||||
if utils.get_system_type() == si_const.TIS_AIO_BUILD:
|
|
||||||
if si_const.LOWLATENCY in tsconfig.subfunctions:
|
|
||||||
ks_file = "smallsystem_lowlatency_ks.cfg"
|
|
||||||
else:
|
|
||||||
ks_file = "smallsystem_ks.cfg"
|
|
||||||
|
|
||||||
try:
|
|
||||||
# prepare the iso files
|
|
||||||
images_dir = os.path.join(iso_dir, 'images')
|
|
||||||
os.mkdir(images_dir, 0o644)
|
|
||||||
pxe_dir = os.path.join('/pxeboot',
|
|
||||||
'rel-' + tsconfig.SW_VERSION)
|
|
||||||
os.symlink(pxe_dir + '/installer-bzImage',
|
|
||||||
iso_dir + '/vmlinuz')
|
|
||||||
os.symlink(pxe_dir + '/installer-initrd',
|
|
||||||
iso_dir + '/initrd.img')
|
|
||||||
utils.progress(total_steps, step, 'preparing files', 'DONE')
|
|
||||||
step += 1
|
|
||||||
|
|
||||||
feed_dir = os.path.join('/www', 'pages', 'feed',
|
|
||||||
'rel-' + tsconfig.SW_VERSION)
|
|
||||||
os.symlink(feed_dir + '/Packages', iso_dir + '/Packages')
|
|
||||||
os.symlink(feed_dir + '/repodata', iso_dir + '/repodata')
|
|
||||||
os.symlink(feed_dir + '/LiveOS', iso_dir + '/LiveOS')
|
|
||||||
shutil.copy2(feed_dir + '/isolinux.cfg', iso_dir)
|
|
||||||
update_bootloader_default(iso_dir + '/isolinux.cfg', controller_0)
|
|
||||||
shutil.copyfile('/usr/share/syslinux/isolinux.bin',
|
|
||||||
iso_dir + '/isolinux.bin')
|
|
||||||
os.symlink('/usr/share/syslinux/vesamenu.c32',
|
|
||||||
iso_dir + '/vesamenu.c32')
|
|
||||||
for filename in glob.glob(os.path.join(feed_dir, '*ks.cfg')):
|
|
||||||
shutil.copy(os.path.join(feed_dir, filename), iso_dir)
|
|
||||||
utils.progress(total_steps, step, 'preparing files', 'DONE')
|
|
||||||
step += 1
|
|
||||||
|
|
||||||
efiboot_dir = os.path.join(iso_dir, 'EFI', 'BOOT')
|
|
||||||
os.makedirs(efiboot_dir, 0o644)
|
|
||||||
l_efi_dir = os.path.join('/boot', 'efi', 'EFI')
|
|
||||||
shutil.copy2(l_efi_dir + '/BOOT/BOOTX64.EFI', efiboot_dir)
|
|
||||||
shutil.copy2(l_efi_dir + '/centos/MokManager.efi', efiboot_dir)
|
|
||||||
shutil.copy2(l_efi_dir + '/centos/grubx64.efi', efiboot_dir)
|
|
||||||
shutil.copy2('/pxeboot/EFI/grub.cfg', efiboot_dir)
|
|
||||||
update_bootloader_default(efiboot_dir + '/grub.cfg', controller_0)
|
|
||||||
shutil.copytree(l_efi_dir + '/centos/fonts',
|
|
||||||
efiboot_dir + '/fonts')
|
|
||||||
# copy EFI boot image and update the grub.cfg file
|
|
||||||
efi_img = images_dir + '/efiboot.img'
|
|
||||||
shutil.copy2(pxe_dir + '/efiboot.img', efi_img)
|
|
||||||
tmpdir = tempfile.mkdtemp(dir=archive_dir)
|
|
||||||
output = subprocess.check_output(
|
|
||||||
["mount", "-t", "vfat", "-o", "loop",
|
|
||||||
efi_img, tmpdir],
|
|
||||||
stderr=subprocess.STDOUT)
|
|
||||||
# replace the grub.cfg file with the updated file
|
|
||||||
efi_grub_f = os.path.join(tmpdir, 'EFI', 'BOOT', 'grub.cfg')
|
|
||||||
os.remove(efi_grub_f)
|
|
||||||
shutil.copy2(efiboot_dir + '/grub.cfg', efi_grub_f)
|
|
||||||
subprocess.call(['umount', tmpdir])
|
|
||||||
shutil.rmtree(tmpdir, ignore_errors=True)
|
|
||||||
tmpdir = None
|
|
||||||
|
|
||||||
epoch_time = "%.9f" % time.time()
|
|
||||||
disc_info = [epoch_time, tsconfig.SW_VERSION, "x86_64"]
|
|
||||||
with open(iso_dir + '/.discinfo', 'w') as f:
|
|
||||||
f.write('\n'.join(disc_info))
|
|
||||||
|
|
||||||
# copy the latest install_clone executable
|
|
||||||
shutil.copy2('/usr/bin/install_clone', iso_dir)
|
|
||||||
subprocess.check_output("cat /pxeboot/post_clone_iso_ks.cfg >> " +
|
|
||||||
iso_dir + "/" + ks_file, shell=True)
|
|
||||||
utils.progress(total_steps, step, 'preparing files', 'DONE')
|
|
||||||
step += 1
|
|
||||||
|
|
||||||
# copy patches
|
|
||||||
iso_patches_dir = os.path.join(iso_dir, 'patches')
|
|
||||||
iso_patch_repo_dir = os.path.join(iso_patches_dir, 'repodata')
|
|
||||||
iso_patch_pkgs_dir = os.path.join(iso_patches_dir, 'Packages')
|
|
||||||
iso_patch_metadata_dir = os.path.join(iso_patches_dir, 'metadata')
|
|
||||||
iso_patch_applied_dir = os.path.join(iso_patch_metadata_dir, 'applied')
|
|
||||||
iso_patch_committed_dir = os.path.join(iso_patch_metadata_dir,
|
|
||||||
'committed')
|
|
||||||
|
|
||||||
os.mkdir(iso_patches_dir, 0o755)
|
|
||||||
os.mkdir(iso_patch_repo_dir, 0o755)
|
|
||||||
os.mkdir(iso_patch_pkgs_dir, 0o755)
|
|
||||||
os.mkdir(iso_patch_metadata_dir, 0o755)
|
|
||||||
os.mkdir(iso_patch_applied_dir, 0o755)
|
|
||||||
os.mkdir(iso_patch_committed_dir, 0o755)
|
|
||||||
|
|
||||||
repodata = '/www/pages/updates/rel-%s/repodata/' % tsconfig.SW_VERSION
|
|
||||||
pkgsdir = '/www/pages/updates/rel-%s/Packages/' % tsconfig.SW_VERSION
|
|
||||||
patch_applied_dir = '/opt/patching/metadata/applied/'
|
|
||||||
patch_committed_dir = '/opt/patching/metadata/committed/'
|
|
||||||
subprocess.check_call(['rsync', '-a', repodata,
|
|
||||||
'%s/' % iso_patch_repo_dir])
|
|
||||||
if os.path.exists(pkgsdir):
|
|
||||||
subprocess.check_call(['rsync', '-a', pkgsdir,
|
|
||||||
'%s/' % iso_patch_pkgs_dir])
|
|
||||||
if os.path.exists(patch_applied_dir):
|
|
||||||
subprocess.check_call(['rsync', '-a', patch_applied_dir,
|
|
||||||
'%s/' % iso_patch_applied_dir])
|
|
||||||
if os.path.exists(patch_committed_dir):
|
|
||||||
subprocess.check_call(['rsync', '-a', patch_committed_dir,
|
|
||||||
'%s/' % iso_patch_committed_dir])
|
|
||||||
utils.progress(total_steps, step, 'preparing files', 'DONE')
|
|
||||||
step += 1
|
|
||||||
|
|
||||||
create_ini_file(clone_archive_dir, iso_name)
|
|
||||||
|
|
||||||
os.chmod(iso_dir + '/isolinux.bin', 0o664)
|
|
||||||
iso_file = os.path.join(archive_dir, iso_name + ".iso")
|
|
||||||
output = subprocess.check_output(
|
|
||||||
["nice", "mkisofs",
|
|
||||||
"-o", iso_file, "-R", "-D",
|
|
||||||
"-A", "oe_iso_boot", "-V", "oe_iso_boot",
|
|
||||||
"-f", "-quiet",
|
|
||||||
"-b", "isolinux.bin", "-c", "boot.cat", "-no-emul-boot",
|
|
||||||
"-boot-load-size", "4", "-boot-info-table",
|
|
||||||
"-eltorito-alt-boot", "-e", "images/efiboot.img",
|
|
||||||
"-no-emul-boot",
|
|
||||||
iso_dir],
|
|
||||||
stderr=subprocess.STDOUT)
|
|
||||||
LOG.info("{} created: [{}]".format(iso_file, output))
|
|
||||||
utils.progress(total_steps, step, 'iso created', 'DONE')
|
|
||||||
step += 1
|
|
||||||
|
|
||||||
output = subprocess.check_output(
|
|
||||||
["nice", "isohybrid",
|
|
||||||
"--uefi",
|
|
||||||
iso_file],
|
|
||||||
stderr=subprocess.STDOUT)
|
|
||||||
LOG.debug("isohybrid: {}".format(output))
|
|
||||||
|
|
||||||
output = subprocess.check_output(
|
|
||||||
["nice", "implantisomd5",
|
|
||||||
iso_file],
|
|
||||||
stderr=subprocess.STDOUT)
|
|
||||||
LOG.debug("implantisomd5: {}".format(output))
|
|
||||||
utils.progress(total_steps, step, 'checksum implanted', 'DONE')
|
|
||||||
print("Cloned iso image created: {}".format(iso_file))
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception(e)
|
|
||||||
e_log = "ISO creation ({}) failed".format(iso_name)
|
|
||||||
if output:
|
|
||||||
e_log += ' [' + output + ']'
|
|
||||||
LOG.error(e_log)
|
|
||||||
raise CloneFail("ISO creation failed.")
|
|
||||||
|
|
||||||
finally:
|
|
||||||
if tmpdir:
|
|
||||||
subprocess.call(['umount', tmpdir], stderr=DEVNULL)
|
|
||||||
shutil.rmtree(tmpdir, ignore_errors=True)
|
|
||||||
|
|
||||||
|
|
||||||
def find_and_replace_in_file(target, find, replace):
|
|
||||||
""" Find and replace a string in a file. """
|
|
||||||
found = None
|
|
||||||
try:
|
|
||||||
for line in fileinput.FileInput(target, inplace=1):
|
|
||||||
if find in line:
|
|
||||||
# look for "find" string within word boundaries
|
|
||||||
fpat = r'\b' + find + r'\b'
|
|
||||||
line = re.sub(fpat, replace, line)
|
|
||||||
found = True
|
|
||||||
print(line, end='')
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
LOG.error("Failed to replace [{}] with [{}] in [{}]: {}"
|
|
||||||
.format(find, replace, target, str(e)))
|
|
||||||
found = None
|
|
||||||
finally:
|
|
||||||
fileinput.close()
|
|
||||||
return found
|
|
||||||
|
|
||||||
|
|
||||||
def find_and_replace(target_list, find, replace):
|
|
||||||
""" Find and replace a string in all files in a directory. """
|
|
||||||
found = False
|
|
||||||
file_list = []
|
|
||||||
for target in target_list:
|
|
||||||
if os.path.isfile(target):
|
|
||||||
if find_and_replace_in_file(target, find, replace):
|
|
||||||
found = True
|
|
||||||
file_list.append(target)
|
|
||||||
elif os.path.isdir(target):
|
|
||||||
try:
|
|
||||||
output = subprocess.check_output(
|
|
||||||
['grep', '-rl', find, target])
|
|
||||||
if output:
|
|
||||||
for line in output.split('\n'):
|
|
||||||
if line and find_and_replace_in_file(
|
|
||||||
line, find, replace):
|
|
||||||
found = True
|
|
||||||
file_list.append(line)
|
|
||||||
except Exception:
|
|
||||||
pass # nothing found in that directory
|
|
||||||
if not found:
|
|
||||||
LOG.error("[{}] not found in backup".format(find))
|
|
||||||
else:
|
|
||||||
LOG.info("Replaced [{}] with [{}] in {}".format(
|
|
||||||
find, replace, file_list))
|
|
||||||
|
|
||||||
|
|
||||||
def remove_from_archive(archive, unwanted):
|
|
||||||
""" Remove a file from the archive. """
|
|
||||||
try:
|
|
||||||
subprocess.check_call(["tar", "--delete",
|
|
||||||
"--file=" + archive,
|
|
||||||
unwanted])
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
LOG.error("Delete of {} failed: {}".format(unwanted, e.output))
|
|
||||||
raise CloneFail("Failed to modify backup archive")
|
|
||||||
|
|
||||||
|
|
||||||
def update_oamip_in_archive(tmpdir):
|
|
||||||
""" Update OAM IP in system archive file. """
|
|
||||||
oam_list = sysinv_api.get_oam_ip()
|
|
||||||
if not oam_list:
|
|
||||||
raise CloneFail("Failed to get OAM IP")
|
|
||||||
for oamfind in [oam_list.oam_start_ip, oam_list.oam_end_ip,
|
|
||||||
oam_list.oam_subnet, oam_list.oam_floating_ip,
|
|
||||||
oam_list.oam_c0_ip, oam_list.oam_c1_ip]:
|
|
||||||
if not oamfind:
|
|
||||||
continue
|
|
||||||
ip = netaddr.IPNetwork(oamfind)
|
|
||||||
find_str = ""
|
|
||||||
if ip.version == 4:
|
|
||||||
# if ipv4, use 192.0.x.x as the temporary oam ip
|
|
||||||
find_str = str(ip.ip)
|
|
||||||
ipstr_list = find_str.split('.')
|
|
||||||
ipstr_list[0] = '192'
|
|
||||||
ipstr_list[1] = '0'
|
|
||||||
repl_ipstr = ".".join(ipstr_list)
|
|
||||||
else:
|
|
||||||
# if ipv6, use 2001:db8:x as the temporary oam ip
|
|
||||||
find_str = str(ip.ip)
|
|
||||||
ipstr_list = find_str.split(':')
|
|
||||||
ipstr_list[0] = '2001'
|
|
||||||
ipstr_list[1] = 'db8'
|
|
||||||
repl_ipstr = ":".join(ipstr_list)
|
|
||||||
if repl_ipstr:
|
|
||||||
find_and_replace(
|
|
||||||
[os.path.join(tmpdir, 'etc/hosts'),
|
|
||||||
os.path.join(tmpdir, 'etc/sysconfig/network-scripts'),
|
|
||||||
os.path.join(tmpdir, 'etc/nfv/vim/config.ini'),
|
|
||||||
os.path.join(tmpdir, 'etc/haproxy/haproxy.cfg'),
|
|
||||||
os.path.join(tmpdir, 'etc/heat/heat.conf'),
|
|
||||||
os.path.join(tmpdir, 'etc/keepalived/keepalived.conf'),
|
|
||||||
os.path.join(tmpdir, 'etc/vswitch/vswitch.ini'),
|
|
||||||
os.path.join(tmpdir, 'etc/nova/nova.conf'),
|
|
||||||
os.path.join(tmpdir, 'config/hosts'),
|
|
||||||
os.path.join(tmpdir, 'hieradata'),
|
|
||||||
os.path.join(tmpdir, 'postgres/keystone.sql.data'),
|
|
||||||
os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
|
|
||||||
find_str, repl_ipstr)
|
|
||||||
else:
|
|
||||||
LOG.error("Failed to modify OAM IP:[{}]"
|
|
||||||
.format(oamfind))
|
|
||||||
raise CloneFail("Failed to modify OAM IP")
|
|
||||||
|
|
||||||
|
|
||||||
def update_mac_in_archive(tmpdir):
|
|
||||||
""" Update MAC addresses in system archive file. """
|
|
||||||
hostname = utils.get_controller_hostname()
|
|
||||||
macs = sysinv_api.get_mac_addresses(hostname)
|
|
||||||
for intf, mac in macs.items():
|
|
||||||
find_and_replace(
|
|
||||||
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
|
|
||||||
mac, "CLONEISOMAC_{}{}".format(hostname, intf))
|
|
||||||
|
|
||||||
if (tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX or
|
|
||||||
tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX_DIRECT):
|
|
||||||
hostname = utils.get_mate_controller_hostname()
|
|
||||||
macs = sysinv_api.get_mac_addresses(hostname)
|
|
||||||
for intf, mac in macs.items():
|
|
||||||
find_and_replace(
|
|
||||||
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
|
|
||||||
mac, "CLONEISOMAC_{}{}".format(hostname, intf))
|
|
||||||
|
|
||||||
|
|
||||||
def update_disk_serial_id_in_archive(tmpdir):
|
|
||||||
""" Update disk serial id in system archive file. """
|
|
||||||
hostname = utils.get_controller_hostname()
|
|
||||||
disk_sids = sysinv_api.get_disk_serial_ids(hostname)
|
|
||||||
for d_dnode, d_sid in disk_sids.items():
|
|
||||||
find_and_replace(
|
|
||||||
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
|
|
||||||
d_sid, "CLONEISODISKSID_{}{}".format(hostname, d_dnode))
|
|
||||||
|
|
||||||
if (tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX or
|
|
||||||
tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX_DIRECT):
|
|
||||||
hostname = utils.get_mate_controller_hostname()
|
|
||||||
disk_sids = sysinv_api.get_disk_serial_ids(hostname)
|
|
||||||
for d_dnode, d_sid in disk_sids.items():
|
|
||||||
find_and_replace(
|
|
||||||
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
|
|
||||||
d_sid, "CLONEISODISKSID_{}{}".format(hostname, d_dnode))
|
|
||||||
|
|
||||||
|
|
||||||
def update_sysuuid_in_archive(tmpdir):
|
|
||||||
""" Update system uuid in system archive file. """
|
|
||||||
sysuuid = sysinv_api.get_system_uuid()
|
|
||||||
find_and_replace(
|
|
||||||
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
|
|
||||||
sysuuid, "CLONEISO_SYSTEM_UUID")
|
|
||||||
|
|
||||||
|
|
||||||
def update_backup_archive(backup_name, archive_dir):
|
|
||||||
""" Update backup archive file to be included in clone-iso """
|
|
||||||
path_to_archive = os.path.join(archive_dir, backup_name)
|
|
||||||
tmpdir = tempfile.mkdtemp(dir=archive_dir)
|
|
||||||
try:
|
|
||||||
subprocess.check_call(
|
|
||||||
['gunzip', path_to_archive + '.tgz'],
|
|
||||||
stdout=DEVNULL, stderr=DEVNULL)
|
|
||||||
# 70-persistent-net.rules with the correct MACs will be
|
|
||||||
# generated on the linux boot on the cloned side. Remove
|
|
||||||
# the stale file from original side.
|
|
||||||
remove_from_archive(path_to_archive + '.tar',
|
|
||||||
'etc/udev/rules.d/70-persistent-net.rules')
|
|
||||||
# Extract only a subset of directories which have files to be
|
|
||||||
# updated for oam-ip and MAC addresses. After updating the files
|
|
||||||
# these directories are added back to the archive.
|
|
||||||
subprocess.check_call(
|
|
||||||
['tar', '-x',
|
|
||||||
'--directory=' + tmpdir,
|
|
||||||
'-f', path_to_archive + '.tar',
|
|
||||||
'etc', 'postgres', 'config',
|
|
||||||
'hieradata'],
|
|
||||||
stdout=DEVNULL, stderr=DEVNULL)
|
|
||||||
update_oamip_in_archive(tmpdir)
|
|
||||||
update_mac_in_archive(tmpdir)
|
|
||||||
update_disk_serial_id_in_archive(tmpdir)
|
|
||||||
update_sysuuid_in_archive(tmpdir)
|
|
||||||
subprocess.check_call(
|
|
||||||
['tar', '--update',
|
|
||||||
'--directory=' + tmpdir,
|
|
||||||
'-f', path_to_archive + '.tar',
|
|
||||||
'etc', 'postgres', 'config',
|
|
||||||
'hieradata'],
|
|
||||||
stdout=DEVNULL, stderr=DEVNULL)
|
|
||||||
subprocess.check_call(['gzip', path_to_archive + '.tar'])
|
|
||||||
shutil.move(path_to_archive + '.tar.gz', path_to_archive + '.tgz')
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
LOG.error("Update of backup archive {} failed {}".format(
|
|
||||||
path_to_archive, str(e)))
|
|
||||||
raise CloneFail("Failed to update backup archive")
|
|
||||||
|
|
||||||
finally:
|
|
||||||
if not DEBUG:
|
|
||||||
shutil.rmtree(tmpdir, ignore_errors=True)
|
|
||||||
|
|
||||||
|
|
||||||
def validate_controller_state():
|
|
||||||
""" Cloning allowed now? """
|
|
||||||
# Check if this Controller is enabled and provisioned
|
|
||||||
try:
|
|
||||||
if not sysinv_api.controller_enabled_provisioned(
|
|
||||||
utils.get_controller_hostname()):
|
|
||||||
raise CloneFail("Controller is not enabled/provisioned")
|
|
||||||
if (tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX or
|
|
||||||
tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX_DIRECT):
|
|
||||||
if not sysinv_api.controller_enabled_provisioned(
|
|
||||||
utils.get_mate_controller_hostname()):
|
|
||||||
raise CloneFail("Mate controller is not enabled/provisioned")
|
|
||||||
except CloneFail:
|
|
||||||
raise
|
|
||||||
except Exception:
|
|
||||||
raise CloneFail("Controller is not enabled/provisioned")
|
|
||||||
|
|
||||||
if utils.get_system_type() != si_const.TIS_AIO_BUILD:
|
|
||||||
raise CloneFail("Cloning supported only on All-in-one systems")
|
|
||||||
|
|
||||||
if len(sysinv_api.get_alarms()) > 0:
|
|
||||||
raise CloneFail("There are active alarms on this system!")
|
|
||||||
|
|
||||||
|
|
||||||
def clone(backup_name, archive_dir):
|
|
||||||
""" Do Cloning """
|
|
||||||
validate_controller_state()
|
|
||||||
LOG.info("Cloning [{}] at [{}]".format(backup_name, archive_dir))
|
|
||||||
check_size(archive_dir)
|
|
||||||
|
|
||||||
isolinux_dir = os.path.join(archive_dir, 'isolinux')
|
|
||||||
clone_archive_dir = os.path.join(isolinux_dir, CLONE_ARCHIVE_DIR)
|
|
||||||
if os.path.exists(isolinux_dir):
|
|
||||||
LOG.info("deleting old iso_dir %s" % isolinux_dir)
|
|
||||||
shutil.rmtree(isolinux_dir, ignore_errors=True)
|
|
||||||
os.makedirs(clone_archive_dir, 0o644)
|
|
||||||
|
|
||||||
try:
|
|
||||||
backup_restore.backup(backup_name, clone_archive_dir, clone=True)
|
|
||||||
LOG.info("system backup done")
|
|
||||||
update_backup_archive(backup_name + '_system', clone_archive_dir)
|
|
||||||
create_iso(backup_name, archive_dir)
|
|
||||||
except BackupFail as e:
|
|
||||||
raise CloneFail(e.message)
|
|
||||||
except CloneFail as e:
|
|
||||||
raise
|
|
||||||
finally:
|
|
||||||
if not DEBUG:
|
|
||||||
shutil.rmtree(isolinux_dir, ignore_errors=True)
|
|
|
@ -1,371 +0,0 @@
|
||||||
"""
|
|
||||||
Copyright (c) 2015-2019 Wind River Systems, Inc.
|
|
||||||
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from netaddr import IPRange
|
|
||||||
from controllerconfig.common.exceptions import ConfigFail
|
|
||||||
from controllerconfig.common.exceptions import ValidateFail
|
|
||||||
from controllerconfig.utils import is_mtu_valid
|
|
||||||
from controllerconfig.utils import is_valid_vlan
|
|
||||||
from controllerconfig.utils import validate_network_str
|
|
||||||
from controllerconfig.utils import validate_address_str
|
|
||||||
|
|
||||||
DEFAULT_CONFIG = 0
|
|
||||||
REGION_CONFIG = 1
|
|
||||||
SUBCLOUD_CONFIG = 2
|
|
||||||
|
|
||||||
MGMT_TYPE = 0
|
|
||||||
INFRA_TYPE = 1
|
|
||||||
OAM_TYPE = 2
|
|
||||||
CLUSTER_TYPE = 3
|
|
||||||
NETWORK_PREFIX_NAMES = [
|
|
||||||
('MGMT', 'INFRA', 'OAM', 'CLUSTER'),
|
|
||||||
('CLM', 'BLS', 'CAN', 'CLUSTER')
|
|
||||||
]
|
|
||||||
|
|
||||||
HOST_XML_ATTRIBUTES = ['hostname', 'personality', 'subfunctions',
|
|
||||||
'mgmt_mac', 'mgmt_ip',
|
|
||||||
'bm_ip', 'bm_type', 'bm_username',
|
|
||||||
'bm_password', 'boot_device', 'rootfs_device',
|
|
||||||
'install_output', 'console', 'vsc_controllers',
|
|
||||||
'power_on', 'location']
|
|
||||||
|
|
||||||
# Network naming types
|
|
||||||
DEFAULT_NAMES = 0
|
|
||||||
HP_NAMES = 1
|
|
||||||
|
|
||||||
# well-known default domain name
|
|
||||||
DEFAULT_DOMAIN_NAME = 'Default'
|
|
||||||
|
|
||||||
|
|
||||||
class LogicalInterface(object):
|
|
||||||
""" Represents configuration for a logical interface.
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
|
||||||
self.name = None
|
|
||||||
self.mtu = None
|
|
||||||
self.lag_interface = False
|
|
||||||
self.lag_mode = None
|
|
||||||
self.ports = None
|
|
||||||
|
|
||||||
def parse_config(self, system_config, logical_interface):
|
|
||||||
# Ensure logical interface config is present
|
|
||||||
if not system_config.has_section(logical_interface):
|
|
||||||
raise ConfigFail("Missing config for logical interface %s." %
|
|
||||||
logical_interface)
|
|
||||||
self.name = logical_interface
|
|
||||||
|
|
||||||
# Parse/validate the MTU
|
|
||||||
self.mtu = system_config.getint(logical_interface, 'INTERFACE_MTU')
|
|
||||||
if not is_mtu_valid(self.mtu):
|
|
||||||
raise ConfigFail("Invalid MTU value for %s. "
|
|
||||||
"Valid values: 576 - 9216" % logical_interface)
|
|
||||||
|
|
||||||
# Parse the ports
|
|
||||||
self.ports = [_f for _f in
|
|
||||||
[x.strip() for x in
|
|
||||||
system_config.get(logical_interface,
|
|
||||||
'INTERFACE_PORTS').split(',')]
|
|
||||||
if _f]
|
|
||||||
|
|
||||||
# Parse/validate the LAG config
|
|
||||||
lag_interface = system_config.get(logical_interface,
|
|
||||||
'LAG_INTERFACE')
|
|
||||||
if lag_interface.lower() == 'y':
|
|
||||||
self.lag_interface = True
|
|
||||||
if len(self.ports) != 2:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Invalid number of ports (%d) supplied for LAG "
|
|
||||||
"interface %s" % (len(self.ports), logical_interface))
|
|
||||||
self.lag_mode = system_config.getint(logical_interface, 'LAG_MODE')
|
|
||||||
if self.lag_mode < 1 or self.lag_mode > 6:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Invalid LAG_MODE value of %d for %s. Valid values: 1-6" %
|
|
||||||
(self.lag_mode, logical_interface))
|
|
||||||
elif lag_interface.lower() == 'n':
|
|
||||||
if len(self.ports) > 1:
|
|
||||||
raise ConfigFail(
|
|
||||||
"More than one interface supplied for non-LAG "
|
|
||||||
"interface %s" % logical_interface)
|
|
||||||
if len(self.ports) == 0:
|
|
||||||
raise ConfigFail(
|
|
||||||
"No interfaces supplied for non-LAG "
|
|
||||||
"interface %s" % logical_interface)
|
|
||||||
else:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Invalid LAG_INTERFACE value of %s for %s. Valid values: "
|
|
||||||
"Y or N" % (lag_interface, logical_interface))
|
|
||||||
|
|
||||||
|
|
||||||
class Network(object):
|
|
||||||
""" Represents configuration for a network.
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
|
||||||
self.vlan = None
|
|
||||||
self.cidr = None
|
|
||||||
self.multicast_cidr = None
|
|
||||||
self.start_address = None
|
|
||||||
self.end_address = None
|
|
||||||
self.start_end_in_config = False
|
|
||||||
self.floating_address = None
|
|
||||||
self.address_0 = None
|
|
||||||
self.address_1 = None
|
|
||||||
self.dynamic_allocation = False
|
|
||||||
self.gateway_address = None
|
|
||||||
self.logical_interface = None
|
|
||||||
|
|
||||||
def parse_config(self, system_config, config_type, network_type,
|
|
||||||
min_addresses=0, multicast_addresses=0, optional=False,
|
|
||||||
naming_type=DEFAULT_NAMES,
|
|
||||||
logical_interface_required=True):
|
|
||||||
network_prefix = NETWORK_PREFIX_NAMES[naming_type][network_type]
|
|
||||||
network_name = network_prefix + '_NETWORK'
|
|
||||||
|
|
||||||
if naming_type == HP_NAMES:
|
|
||||||
attr_prefix = network_prefix + '_'
|
|
||||||
else:
|
|
||||||
attr_prefix = ''
|
|
||||||
|
|
||||||
# Ensure network config is present
|
|
||||||
if not system_config.has_section(network_name):
|
|
||||||
if not optional:
|
|
||||||
raise ConfigFail("Missing config for network %s." %
|
|
||||||
network_name)
|
|
||||||
else:
|
|
||||||
# Optional interface - just return
|
|
||||||
return
|
|
||||||
|
|
||||||
# Parse/validate the VLAN
|
|
||||||
if system_config.has_option(network_name, attr_prefix + 'VLAN'):
|
|
||||||
self.vlan = system_config.getint(network_name,
|
|
||||||
attr_prefix + 'VLAN')
|
|
||||||
if self.vlan:
|
|
||||||
if not is_valid_vlan(self.vlan):
|
|
||||||
raise ConfigFail(
|
|
||||||
"Invalid %s value of %d for %s. Valid values: 1-4094" %
|
|
||||||
(attr_prefix + 'VLAN', self.vlan, network_name))
|
|
||||||
|
|
||||||
# Parse/validate the cidr
|
|
||||||
cidr_str = system_config.get(network_name, attr_prefix + 'CIDR')
|
|
||||||
try:
|
|
||||||
self.cidr = validate_network_str(
|
|
||||||
cidr_str, min_addresses)
|
|
||||||
except ValidateFail as e:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Invalid %s value of %s for %s.\nReason: %s" %
|
|
||||||
(attr_prefix + 'CIDR', cidr_str, network_name, e))
|
|
||||||
|
|
||||||
# Parse/validate the multicast subnet
|
|
||||||
if 0 < multicast_addresses and \
|
|
||||||
system_config.has_option(network_name,
|
|
||||||
attr_prefix + 'MULTICAST_CIDR'):
|
|
||||||
multicast_cidr_str = system_config.get(network_name, attr_prefix +
|
|
||||||
'MULTICAST_CIDR')
|
|
||||||
try:
|
|
||||||
self.multicast_cidr = validate_network_str(
|
|
||||||
multicast_cidr_str, multicast_addresses, multicast=True)
|
|
||||||
except ValidateFail as e:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Invalid %s value of %s for %s.\nReason: %s" %
|
|
||||||
(attr_prefix + 'MULTICAST_CIDR', multicast_cidr_str,
|
|
||||||
network_name, e))
|
|
||||||
|
|
||||||
if self.cidr.version != self.multicast_cidr.version:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Invalid %s value of %s for %s. Multicast "
|
|
||||||
"subnet and network IP families must be the same." %
|
|
||||||
(attr_prefix + 'MULTICAST_CIDR', multicast_cidr_str,
|
|
||||||
network_name))
|
|
||||||
|
|
||||||
# Parse/validate the hardwired controller addresses
|
|
||||||
floating_address_str = None
|
|
||||||
address_0_str = None
|
|
||||||
address_1_str = None
|
|
||||||
|
|
||||||
if min_addresses == 1:
|
|
||||||
if (system_config.has_option(
|
|
||||||
network_name, attr_prefix + 'IP_FLOATING_ADDRESS') or
|
|
||||||
system_config.has_option(
|
|
||||||
network_name, attr_prefix + 'IP_UNIT_0_ADDRESS') or
|
|
||||||
system_config.has_option(
|
|
||||||
network_name, attr_prefix + 'IP_UNIT_1_ADDRESS') or
|
|
||||||
system_config.has_option(
|
|
||||||
network_name, attr_prefix + 'IP_START_ADDRESS') or
|
|
||||||
system_config.has_option(
|
|
||||||
network_name, attr_prefix + 'IP_END_ADDRESS')):
|
|
||||||
raise ConfigFail(
|
|
||||||
"Only one IP address is required for OAM "
|
|
||||||
"network, use 'IP_ADDRESS' to specify the OAM IP "
|
|
||||||
"address")
|
|
||||||
floating_address_str = system_config.get(
|
|
||||||
network_name, attr_prefix + 'IP_ADDRESS')
|
|
||||||
try:
|
|
||||||
self.floating_address = validate_address_str(
|
|
||||||
floating_address_str, self.cidr)
|
|
||||||
except ValidateFail as e:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Invalid %s value of %s for %s.\nReason: %s" %
|
|
||||||
(attr_prefix + 'IP_ADDRESS',
|
|
||||||
floating_address_str, network_name, e))
|
|
||||||
self.address_0 = self.floating_address
|
|
||||||
self.address_1 = self.floating_address
|
|
||||||
else:
|
|
||||||
if system_config.has_option(
|
|
||||||
network_name, attr_prefix + 'IP_FLOATING_ADDRESS'):
|
|
||||||
floating_address_str = system_config.get(
|
|
||||||
network_name, attr_prefix + 'IP_FLOATING_ADDRESS')
|
|
||||||
try:
|
|
||||||
self.floating_address = validate_address_str(
|
|
||||||
floating_address_str, self.cidr)
|
|
||||||
except ValidateFail as e:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Invalid %s value of %s for %s.\nReason: %s" %
|
|
||||||
(attr_prefix + 'IP_FLOATING_ADDRESS',
|
|
||||||
floating_address_str, network_name, e))
|
|
||||||
|
|
||||||
if system_config.has_option(
|
|
||||||
network_name, attr_prefix + 'IP_UNIT_0_ADDRESS'):
|
|
||||||
address_0_str = system_config.get(
|
|
||||||
network_name, attr_prefix + 'IP_UNIT_0_ADDRESS')
|
|
||||||
try:
|
|
||||||
self.address_0 = validate_address_str(
|
|
||||||
address_0_str, self.cidr)
|
|
||||||
except ValidateFail as e:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Invalid %s value of %s for %s.\nReason: %s" %
|
|
||||||
(attr_prefix + 'IP_UNIT_0_ADDRESS',
|
|
||||||
address_0_str, network_name, e))
|
|
||||||
|
|
||||||
if system_config.has_option(
|
|
||||||
network_name, attr_prefix + 'IP_UNIT_1_ADDRESS'):
|
|
||||||
address_1_str = system_config.get(
|
|
||||||
network_name, attr_prefix + 'IP_UNIT_1_ADDRESS')
|
|
||||||
try:
|
|
||||||
self.address_1 = validate_address_str(
|
|
||||||
address_1_str, self.cidr)
|
|
||||||
except ValidateFail as e:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Invalid %s value of %s for %s.\nReason: %s" %
|
|
||||||
(attr_prefix + 'IP_UNIT_1_ADDRESS',
|
|
||||||
address_1_str, network_name, e))
|
|
||||||
|
|
||||||
# Parse/validate the start/end addresses
|
|
||||||
start_address_str = None
|
|
||||||
end_address_str = None
|
|
||||||
if system_config.has_option(
|
|
||||||
network_name, attr_prefix + 'IP_START_ADDRESS'):
|
|
||||||
start_address_str = system_config.get(
|
|
||||||
network_name, attr_prefix + 'IP_START_ADDRESS')
|
|
||||||
try:
|
|
||||||
self.start_address = validate_address_str(
|
|
||||||
start_address_str, self.cidr)
|
|
||||||
except ValidateFail as e:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Invalid %s value of %s for %s.\nReason: %s" %
|
|
||||||
(attr_prefix + 'IP_START_ADDRESS',
|
|
||||||
start_address_str, network_name, e))
|
|
||||||
|
|
||||||
if system_config.has_option(
|
|
||||||
network_name, attr_prefix + 'IP_END_ADDRESS'):
|
|
||||||
end_address_str = system_config.get(
|
|
||||||
network_name, attr_prefix + 'IP_END_ADDRESS')
|
|
||||||
try:
|
|
||||||
self.end_address = validate_address_str(
|
|
||||||
end_address_str, self.cidr)
|
|
||||||
except ValidateFail as e:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Invalid %s value of %s for %s.\nReason: %s " %
|
|
||||||
(attr_prefix + 'IP_END_ADDRESS',
|
|
||||||
end_address_str, network_name, e))
|
|
||||||
|
|
||||||
if start_address_str or end_address_str:
|
|
||||||
if not end_address_str:
|
|
||||||
raise ConfigFail("Missing attribute %s for %s_NETWORK" %
|
|
||||||
(attr_prefix + 'IP_END_ADDRESS',
|
|
||||||
network_name))
|
|
||||||
if not start_address_str:
|
|
||||||
raise ConfigFail("Missing attribute %s for %s_NETWORK" %
|
|
||||||
(attr_prefix + 'IP_START_ADDRESS',
|
|
||||||
network_name))
|
|
||||||
if not self.start_address < self.end_address:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Start address %s not less than end address %s for %s."
|
|
||||||
% (str(self.start_address), str(self.end_address),
|
|
||||||
network_name))
|
|
||||||
if not IPRange(start_address_str, end_address_str).size >= \
|
|
||||||
min_addresses:
|
|
||||||
raise ConfigFail("Address range for %s must contain at "
|
|
||||||
"least %d addresses." %
|
|
||||||
(network_name, min_addresses))
|
|
||||||
self.start_end_in_config = True
|
|
||||||
|
|
||||||
if floating_address_str or address_0_str or address_1_str:
|
|
||||||
if not floating_address_str:
|
|
||||||
raise ConfigFail("Missing attribute %s for %s_NETWORK" %
|
|
||||||
(attr_prefix + 'IP_FLOATING_ADDRESS',
|
|
||||||
network_name))
|
|
||||||
if not address_0_str:
|
|
||||||
raise ConfigFail("Missing attribute %s for %s_NETWORK" %
|
|
||||||
(attr_prefix + 'IP_UNIT_0_ADDRESS',
|
|
||||||
network_name))
|
|
||||||
if not address_1_str:
|
|
||||||
raise ConfigFail("Missing attribute %s for %s_NETWORK" %
|
|
||||||
(attr_prefix + 'IP_UNIT_1_ADDRESS',
|
|
||||||
network_name))
|
|
||||||
|
|
||||||
if start_address_str and floating_address_str:
|
|
||||||
raise ConfigFail("Overspecified network: Can only set %s "
|
|
||||||
"and %s OR %s, %s, and %s for "
|
|
||||||
"%s_NETWORK" %
|
|
||||||
(attr_prefix + 'IP_START_ADDRESS',
|
|
||||||
attr_prefix + 'IP_END_ADDRESS',
|
|
||||||
attr_prefix + 'IP_FLOATING_ADDRESS',
|
|
||||||
attr_prefix + 'IP_UNIT_0_ADDRESS',
|
|
||||||
attr_prefix + 'IP_UNIT_1_ADDRESS',
|
|
||||||
network_name))
|
|
||||||
|
|
||||||
if config_type == DEFAULT_CONFIG:
|
|
||||||
if not self.start_address:
|
|
||||||
self.start_address = self.cidr[2]
|
|
||||||
if not self.end_address:
|
|
||||||
self.end_address = self.cidr[-2]
|
|
||||||
|
|
||||||
# Parse/validate the dynamic IP address allocation
|
|
||||||
if system_config.has_option(network_name,
|
|
||||||
'DYNAMIC_ALLOCATION'):
|
|
||||||
dynamic_allocation = system_config.get(network_name,
|
|
||||||
'DYNAMIC_ALLOCATION')
|
|
||||||
if dynamic_allocation.lower() == 'y':
|
|
||||||
self.dynamic_allocation = True
|
|
||||||
elif dynamic_allocation.lower() == 'n':
|
|
||||||
self.dynamic_allocation = False
|
|
||||||
else:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Invalid DYNAMIC_ALLOCATION value of %s for %s. "
|
|
||||||
"Valid values: Y or N" %
|
|
||||||
(dynamic_allocation, network_name))
|
|
||||||
|
|
||||||
# Parse/validate the gateway (optional)
|
|
||||||
if system_config.has_option(network_name, attr_prefix + 'GATEWAY'):
|
|
||||||
gateway_address_str = system_config.get(
|
|
||||||
network_name, attr_prefix + 'GATEWAY')
|
|
||||||
try:
|
|
||||||
self.gateway_address = validate_address_str(
|
|
||||||
gateway_address_str, self.cidr)
|
|
||||||
except ValidateFail as e:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Invalid %s value of %s for %s.\nReason: %s" %
|
|
||||||
(attr_prefix + 'GATEWAY',
|
|
||||||
gateway_address_str, network_name, e))
|
|
||||||
|
|
||||||
# Parse/validate the logical interface
|
|
||||||
if logical_interface_required or system_config.has_option(
|
|
||||||
network_name, attr_prefix + 'LOGICAL_INTERFACE'):
|
|
||||||
logical_interface_name = system_config.get(
|
|
||||||
network_name, attr_prefix + 'LOGICAL_INTERFACE')
|
|
||||||
self.logical_interface = LogicalInterface()
|
|
||||||
self.logical_interface.parse_config(system_config,
|
|
||||||
logical_interface_name)
|
|
|
@ -1,10 +1,9 @@
|
||||||
#
|
#
|
||||||
# Copyright (c) 2016-2019 Wind River Systems, Inc.
|
# Copyright (c) 2016-2020 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
|
||||||
from sysinv.common import constants as sysinv_constants
|
|
||||||
from tsconfig import tsconfig
|
from tsconfig import tsconfig
|
||||||
|
|
||||||
|
|
||||||
|
@ -15,70 +14,9 @@ CONFIG_PERMDIR = tsconfig.CONFIG_PATH
|
||||||
HIERADATA_WORKDIR = '/tmp/hieradata'
|
HIERADATA_WORKDIR = '/tmp/hieradata'
|
||||||
HIERADATA_PERMDIR = tsconfig.PUPPET_PATH + 'hieradata'
|
HIERADATA_PERMDIR = tsconfig.PUPPET_PATH + 'hieradata'
|
||||||
|
|
||||||
ARMADA_PERMDIR = tsconfig.ARMADA_PATH
|
|
||||||
HELM_CHARTS_PERMDIR = tsconfig.PLATFORM_PATH + '/helm_charts'
|
|
||||||
HELM_OVERRIDES_PERMDIR = tsconfig.HELM_OVERRIDES_PATH
|
|
||||||
|
|
||||||
KEYRING_WORKDIR = '/tmp/python_keyring'
|
KEYRING_WORKDIR = '/tmp/python_keyring'
|
||||||
KEYRING_PERMDIR = tsconfig.KEYRING_PATH
|
KEYRING_PERMDIR = tsconfig.KEYRING_PATH
|
||||||
|
|
||||||
INITIAL_CONFIG_COMPLETE_FILE = '/etc/platform/.initial_config_complete'
|
INITIAL_CONFIG_COMPLETE_FILE = '/etc/platform/.initial_config_complete'
|
||||||
CONFIG_FAIL_FILE = '/var/run/.config_fail'
|
|
||||||
COMMON_CERT_FILE = "/etc/ssl/private/server-cert.pem"
|
|
||||||
FIREWALL_RULES_FILE = '/etc/platform/iptables.rules'
|
|
||||||
OPENSTACK_PASSWORD_RULES_FILE = '/etc/keystone/password-rules.conf'
|
|
||||||
INSTALLATION_FAILED_FILE = '/etc/platform/installation_failed'
|
|
||||||
|
|
||||||
BACKUPS_PATH = '/opt/backups'
|
BACKUPS_PATH = '/opt/backups'
|
||||||
|
|
||||||
INTERFACES_LOG_FILE = "/tmp/configure_interfaces.log"
|
|
||||||
|
|
||||||
LINK_MTU_DEFAULT = "1500"
|
|
||||||
|
|
||||||
CINDER_LVM_THIN = "thin"
|
|
||||||
CINDER_LVM_THICK = "thick"
|
|
||||||
|
|
||||||
DEFAULT_DATABASE_STOR_SIZE = \
|
|
||||||
sysinv_constants.DEFAULT_DATABASE_STOR_SIZE
|
|
||||||
DEFAULT_SMALL_DATABASE_STOR_SIZE = \
|
|
||||||
sysinv_constants.DEFAULT_SMALL_DATABASE_STOR_SIZE
|
|
||||||
DEFAULT_SMALL_BACKUP_STOR_SIZE = \
|
|
||||||
sysinv_constants.DEFAULT_SMALL_BACKUP_STOR_SIZE
|
|
||||||
DEFAULT_VIRTUAL_DATABASE_STOR_SIZE = \
|
|
||||||
sysinv_constants.DEFAULT_VIRTUAL_DATABASE_STOR_SIZE
|
|
||||||
DEFAULT_VIRTUAL_BACKUP_STOR_SIZE = \
|
|
||||||
sysinv_constants.DEFAULT_VIRTUAL_BACKUP_STOR_SIZE
|
|
||||||
DEFAULT_EXTENSION_STOR_SIZE = \
|
|
||||||
sysinv_constants.DEFAULT_EXTENSION_STOR_SIZE
|
|
||||||
DEFAULT_PLATFORM_STOR_SIZE = \
|
|
||||||
sysinv_constants.DEFAULT_PLATFORM_STOR_SIZE
|
|
||||||
|
|
||||||
SYSTEM_CONFIG_TIMEOUT = 420
|
|
||||||
SERVICE_ENABLE_TIMEOUT = 180
|
|
||||||
MINIMUM_ROOT_DISK_SIZE = 500
|
|
||||||
MAXIMUM_CGCS_LV_SIZE = 500
|
|
||||||
LDAP_CONTROLLER_CONFIGURE_TIMEOUT = 30
|
|
||||||
SYSADMIN_MAX_PASSWORD_AGE = 45 # 45 days
|
|
||||||
|
|
||||||
LAG_MODE_ACTIVE_BACKUP = "active-backup"
|
|
||||||
LAG_MODE_BALANCE_XOR = "balance-xor"
|
|
||||||
LAG_MODE_8023AD = "802.3ad"
|
|
||||||
|
|
||||||
LAG_TXHASH_LAYER2 = "layer2"
|
|
||||||
|
|
||||||
LAG_MIIMON_FREQUENCY = 100
|
|
||||||
|
|
||||||
LOOPBACK_IFNAME = 'lo'
|
|
||||||
|
|
||||||
DEFAULT_MULTICAST_SUBNET_IPV4 = '239.1.1.0/28'
|
|
||||||
DEFAULT_MULTICAST_SUBNET_IPV6 = 'ff08::1:1:0/124'
|
|
||||||
|
|
||||||
DEFAULT_MGMT_ON_LOOPBACK_SUBNET_IPV4 = '192.168.204.0/28'
|
|
||||||
|
|
||||||
DEFAULT_REGION_NAME = "RegionOne"
|
|
||||||
DEFAULT_SERVICE_PROJECT_NAME = "services"
|
|
||||||
|
|
||||||
SSH_WARNING_MESSAGE = "WARNING: Command should only be run from the " \
|
|
||||||
"console. Continuing with this terminal may cause " \
|
|
||||||
"loss of connectivity and configuration failure."
|
|
||||||
SSH_ERROR_MESSAGE = "ERROR: Command should only be run from the console."
|
|
||||||
|
|
|
@ -1,102 +0,0 @@
|
||||||
# Copyright 2011 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
|
|
||||||
"""
|
|
||||||
Routines for URL-safe encrypting/decrypting
|
|
||||||
|
|
||||||
Cloned from git/glance/common
|
|
||||||
"""
|
|
||||||
|
|
||||||
import base64
|
|
||||||
import os
|
|
||||||
import random
|
|
||||||
|
|
||||||
from cryptography.hazmat.backends import default_backend
|
|
||||||
from cryptography.hazmat.primitives.ciphers import algorithms
|
|
||||||
from cryptography.hazmat.primitives.ciphers import Cipher
|
|
||||||
from cryptography.hazmat.primitives.ciphers import modes
|
|
||||||
from oslo_utils import encodeutils
|
|
||||||
import six
|
|
||||||
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
|
|
||||||
from six.moves import range
|
|
||||||
|
|
||||||
|
|
||||||
def urlsafe_encrypt(key, plaintext, blocksize=16):
|
|
||||||
"""Encrypts plaintext.
|
|
||||||
|
|
||||||
Resulting ciphertext will contain URL-safe characters.
|
|
||||||
If plaintext is Unicode, encode it to UTF-8 before encryption.
|
|
||||||
|
|
||||||
:param key: AES secret key
|
|
||||||
:param plaintext: Input text to be encrypted
|
|
||||||
:param blocksize: Non-zero integer multiple of AES blocksize in bytes (16)
|
|
||||||
:returns: Resulting ciphertext
|
|
||||||
"""
|
|
||||||
|
|
||||||
def pad(text):
|
|
||||||
"""Pads text to be encrypted"""
|
|
||||||
pad_length = (blocksize - len(text) % blocksize)
|
|
||||||
# NOTE(rosmaita): I know this looks stupid, but we can't just
|
|
||||||
# use os.urandom() to get the bytes because we use char(0) as
|
|
||||||
# a delimiter
|
|
||||||
pad = b''.join(six.int2byte(random.SystemRandom().randint(1, 0xFF))
|
|
||||||
for i in range(pad_length - 1))
|
|
||||||
# We use chr(0) as a delimiter between text and padding
|
|
||||||
return text + b'\0' + pad
|
|
||||||
|
|
||||||
plaintext = encodeutils.to_utf8(plaintext)
|
|
||||||
key = encodeutils.to_utf8(key)
|
|
||||||
# random initial 16 bytes for CBC
|
|
||||||
init_vector = os.urandom(16)
|
|
||||||
backend = default_backend()
|
|
||||||
cypher = Cipher(algorithms.AES(key), modes.CBC(init_vector),
|
|
||||||
backend=backend)
|
|
||||||
encryptor = cypher.encryptor()
|
|
||||||
padded = encryptor.update(
|
|
||||||
pad(six.binary_type(plaintext))) + encryptor.finalize()
|
|
||||||
encoded = base64.urlsafe_b64encode(init_vector + padded)
|
|
||||||
if six.PY3:
|
|
||||||
encoded = encoded.decode('ascii')
|
|
||||||
return encoded
|
|
||||||
|
|
||||||
|
|
||||||
def urlsafe_decrypt(key, ciphertext):
|
|
||||||
"""Decrypts URL-safe base64 encoded ciphertext.
|
|
||||||
|
|
||||||
On Python 3, the result is decoded from UTF-8.
|
|
||||||
|
|
||||||
:param key: AES secret key
|
|
||||||
:param ciphertext: The encrypted text to decrypt
|
|
||||||
|
|
||||||
:returns: Resulting plaintext
|
|
||||||
"""
|
|
||||||
# Cast from unicode
|
|
||||||
ciphertext = encodeutils.to_utf8(ciphertext)
|
|
||||||
key = encodeutils.to_utf8(key)
|
|
||||||
ciphertext = base64.urlsafe_b64decode(ciphertext)
|
|
||||||
backend = default_backend()
|
|
||||||
cypher = Cipher(algorithms.AES(key), modes.CBC(ciphertext[:16]),
|
|
||||||
backend=backend)
|
|
||||||
decryptor = cypher.decryptor()
|
|
||||||
padded = decryptor.update(ciphertext[16:]) + decryptor.finalize()
|
|
||||||
text = padded[:padded.rfind(b'\0')]
|
|
||||||
if six.PY3:
|
|
||||||
text = text.decode('utf-8')
|
|
||||||
return text
|
|
|
@ -1,44 +0,0 @@
|
||||||
#
|
|
||||||
# Copyright (c) 2017-2019 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
|
|
||||||
"""
|
|
||||||
DC Manager Interactions
|
|
||||||
"""
|
|
||||||
|
|
||||||
from controllerconfig.common import log
|
|
||||||
|
|
||||||
from Crypto.Hash import MD5
|
|
||||||
from controllerconfig.common import crypt
|
|
||||||
|
|
||||||
import json
|
|
||||||
|
|
||||||
|
|
||||||
LOG = log.get_logger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class UserList(object):
|
|
||||||
"""
|
|
||||||
User List
|
|
||||||
"""
|
|
||||||
def __init__(self, user_data, hash_string):
|
|
||||||
# Decrypt the data using input hash_string to generate
|
|
||||||
# the key
|
|
||||||
h = MD5.new()
|
|
||||||
h.update(hash_string)
|
|
||||||
encryption_key = h.hexdigest()
|
|
||||||
user_data_decrypted = crypt.urlsafe_decrypt(encryption_key,
|
|
||||||
user_data)
|
|
||||||
|
|
||||||
self._data = json.loads(user_data_decrypted)
|
|
||||||
|
|
||||||
def get_password(self, name):
|
|
||||||
"""
|
|
||||||
Search the users for the password
|
|
||||||
"""
|
|
||||||
for user in self._data:
|
|
||||||
if user['name'] == name:
|
|
||||||
return user['password']
|
|
||||||
return None
|
|
|
@ -1,5 +1,5 @@
|
||||||
#
|
#
|
||||||
# Copyright (c) 2014-2019 Wind River Systems, Inc.
|
# Copyright (c) 2014-2020 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
@ -20,56 +20,21 @@ class ConfigError(Exception):
|
||||||
return self.message or ""
|
return self.message or ""
|
||||||
|
|
||||||
|
|
||||||
class ConfigFail(ConfigError):
|
|
||||||
"""General configuration error."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class ValidateFail(ConfigError):
|
class ValidateFail(ConfigError):
|
||||||
"""Validation of data failed."""
|
"""Validation of data failed."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class BackupFail(ConfigError):
|
|
||||||
"""Backup error."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class UpgradeFail(ConfigError):
|
class UpgradeFail(ConfigError):
|
||||||
"""Upgrade error."""
|
"""Upgrade error."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class BackupWarn(ConfigError):
|
|
||||||
"""Backup warning."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class RestoreFail(ConfigError):
|
|
||||||
"""Backup error."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class KeystoneFail(ConfigError):
|
class KeystoneFail(ConfigError):
|
||||||
"""Keystone error."""
|
"""Keystone error."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class SysInvFail(ConfigError):
|
|
||||||
"""System Inventory error."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class UserQuit(ConfigError):
|
|
||||||
"""User initiated quit operation."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class CloneFail(ConfigError):
|
|
||||||
"""Clone error."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TidyStorageFail(ConfigError):
|
class TidyStorageFail(ConfigError):
|
||||||
"""Tidy storage error."""
|
"""Tidy storage error."""
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -12,10 +12,9 @@ import datetime
|
||||||
import iso8601
|
import iso8601
|
||||||
|
|
||||||
from controllerconfig.common.exceptions import KeystoneFail
|
from controllerconfig.common.exceptions import KeystoneFail
|
||||||
from controllerconfig.common import log
|
from oslo_log import log
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
LOG = log.get_logger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class Token(object):
|
class Token(object):
|
||||||
|
|
|
@ -1,49 +0,0 @@
|
||||||
#
|
|
||||||
# Copyright (c) 2014 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
|
|
||||||
"""
|
|
||||||
Logging
|
|
||||||
"""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import logging.handlers
|
|
||||||
|
|
||||||
_loggers = {}
|
|
||||||
|
|
||||||
|
|
||||||
def get_logger(name):
|
|
||||||
""" Get a logger or create one """
|
|
||||||
|
|
||||||
if name not in _loggers:
|
|
||||||
_loggers[name] = logging.getLogger(name)
|
|
||||||
|
|
||||||
return _loggers[name]
|
|
||||||
|
|
||||||
|
|
||||||
def setup_logger(logger):
|
|
||||||
""" Setup a logger """
|
|
||||||
|
|
||||||
# Send logs to /var/log/platform.log
|
|
||||||
syslog_facility = logging.handlers.SysLogHandler.LOG_LOCAL1
|
|
||||||
|
|
||||||
formatter = logging.Formatter("configassistant[%(process)d] " +
|
|
||||||
"%(pathname)s:%(lineno)s " +
|
|
||||||
"%(levelname)8s [%(name)s] %(message)s")
|
|
||||||
|
|
||||||
handler = logging.handlers.SysLogHandler(address='/dev/log',
|
|
||||||
facility=syslog_facility)
|
|
||||||
handler.setLevel(logging.INFO)
|
|
||||||
handler.setFormatter(formatter)
|
|
||||||
|
|
||||||
logger.addHandler(handler)
|
|
||||||
logger.setLevel(logging.INFO)
|
|
||||||
|
|
||||||
|
|
||||||
def configure():
|
|
||||||
""" Setup logging """
|
|
||||||
|
|
||||||
for logger in _loggers:
|
|
||||||
setup_logger(_loggers[logger])
|
|
|
@ -1,5 +1,5 @@
|
||||||
"""
|
"""
|
||||||
Copyright (c) 2015-2017 Wind River Systems, Inc.
|
Copyright (c) 2015-2020 Wind River Systems, Inc.
|
||||||
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
@ -7,16 +7,15 @@ SPDX-License-Identifier: Apache-2.0
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from controllerconfig.common.exceptions import KeystoneFail
|
from controllerconfig.common.exceptions import KeystoneFail
|
||||||
from controllerconfig.common import dcmanager
|
|
||||||
from controllerconfig.common import keystone
|
from controllerconfig.common import keystone
|
||||||
from controllerconfig.common import log
|
|
||||||
from six.moves import http_client as httplib
|
from six.moves import http_client as httplib
|
||||||
from six.moves.urllib import request as urlrequest
|
from six.moves.urllib import request as urlrequest
|
||||||
from six.moves.urllib.error import HTTPError
|
from six.moves.urllib.error import HTTPError
|
||||||
from six.moves.urllib.error import URLError
|
from six.moves.urllib.error import URLError
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
|
||||||
LOG = log.get_logger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def rest_api_request(token, method, api_cmd, api_cmd_headers=None,
|
def rest_api_request(token, method, api_cmd, api_cmd_headers=None,
|
||||||
|
@ -324,16 +323,3 @@ def delete_project(token, api_url, id):
|
||||||
api_cmd = api_url + "/projects/" + id
|
api_cmd = api_url + "/projects/" + id
|
||||||
response = rest_api_request(token, "DELETE", api_cmd,)
|
response = rest_api_request(token, "DELETE", api_cmd,)
|
||||||
return keystone.Project(response)
|
return keystone.Project(response)
|
||||||
|
|
||||||
|
|
||||||
def get_subcloud_config(token, api_url, subcloud_name,
|
|
||||||
hash_string):
|
|
||||||
"""
|
|
||||||
Ask DC Manager for our subcloud configuration
|
|
||||||
"""
|
|
||||||
api_cmd = api_url + "/subclouds/" + subcloud_name + "/config"
|
|
||||||
response = rest_api_request(token, "GET", api_cmd)
|
|
||||||
config = dict()
|
|
||||||
config['users'] = dcmanager.UserList(response['users'], hash_string)
|
|
||||||
|
|
||||||
return config
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -1,285 +0,0 @@
|
||||||
#
|
|
||||||
# Copyright (c) 2014-2015 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
|
|
||||||
"""
|
|
||||||
OpenStack
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
from controllerconfig.common import log
|
|
||||||
from controllerconfig.common.exceptions import SysInvFail
|
|
||||||
from controllerconfig.common.rest_api_utils import get_token
|
|
||||||
from controllerconfig import sysinv_api as sysinv
|
|
||||||
|
|
||||||
|
|
||||||
LOG = log.get_logger(__name__)
|
|
||||||
|
|
||||||
KEYSTONE_AUTH_SERVER_RETRY_CNT = 60
|
|
||||||
KEYSTONE_AUTH_SERVER_WAIT = 1 # 1sec wait per retry
|
|
||||||
|
|
||||||
|
|
||||||
class OpenStack(object):
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.admin_token = None
|
|
||||||
self.conf = {}
|
|
||||||
self._sysinv = None
|
|
||||||
|
|
||||||
source_command = 'source /etc/platform/openrc && env'
|
|
||||||
|
|
||||||
with open(os.devnull, "w") as fnull:
|
|
||||||
proc = subprocess.Popen(
|
|
||||||
['bash', '-c', source_command],
|
|
||||||
stdout=subprocess.PIPE, stderr=fnull)
|
|
||||||
|
|
||||||
for line in proc.stdout:
|
|
||||||
key, _, value = line.partition("=")
|
|
||||||
if key == 'OS_USERNAME':
|
|
||||||
self.conf['admin_user'] = value.strip()
|
|
||||||
elif key == 'OS_PASSWORD':
|
|
||||||
self.conf['admin_pwd'] = value.strip()
|
|
||||||
elif key == 'OS_PROJECT_NAME':
|
|
||||||
self.conf['admin_tenant'] = value.strip()
|
|
||||||
elif key == 'OS_AUTH_URL':
|
|
||||||
self.conf['auth_url'] = value.strip()
|
|
||||||
elif key == 'OS_REGION_NAME':
|
|
||||||
self.conf['region_name'] = value.strip()
|
|
||||||
elif key == 'OS_USER_DOMAIN_NAME':
|
|
||||||
self.conf['user_domain'] = value.strip()
|
|
||||||
elif key == 'OS_PROJECT_DOMAIN_NAME':
|
|
||||||
self.conf['project_domain'] = value.strip()
|
|
||||||
|
|
||||||
proc.communicate()
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
if not self._connect():
|
|
||||||
raise Exception('Failed to connect')
|
|
||||||
return self
|
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
||||||
self._disconnect()
|
|
||||||
|
|
||||||
def __del__(self):
|
|
||||||
self._disconnect()
|
|
||||||
|
|
||||||
def _connect(self):
|
|
||||||
""" Connect to an OpenStack instance """
|
|
||||||
|
|
||||||
if self.admin_token is not None:
|
|
||||||
self._disconnect()
|
|
||||||
|
|
||||||
# Try to obtain an admin token from keystone
|
|
||||||
for _ in range(KEYSTONE_AUTH_SERVER_RETRY_CNT):
|
|
||||||
self.admin_token = get_token(self.conf['auth_url'],
|
|
||||||
self.conf['admin_tenant'],
|
|
||||||
self.conf['admin_user'],
|
|
||||||
self.conf['admin_pwd'],
|
|
||||||
self.conf['user_domain'],
|
|
||||||
self.conf['project_domain'])
|
|
||||||
if self.admin_token:
|
|
||||||
break
|
|
||||||
time.sleep(KEYSTONE_AUTH_SERVER_WAIT)
|
|
||||||
|
|
||||||
return self.admin_token is not None
|
|
||||||
|
|
||||||
def _disconnect(self):
|
|
||||||
""" Disconnect from an OpenStack instance """
|
|
||||||
self.admin_token = None
|
|
||||||
|
|
||||||
def lock_hosts(self, exempt_hostnames=None, progress_callback=None,
|
|
||||||
timeout=60):
|
|
||||||
""" Lock hosts of an OpenStack instance except for host names
|
|
||||||
in the exempt list
|
|
||||||
"""
|
|
||||||
failed_hostnames = []
|
|
||||||
|
|
||||||
if exempt_hostnames is None:
|
|
||||||
exempt_hostnames = []
|
|
||||||
|
|
||||||
hosts = sysinv.get_hosts(self.admin_token, self.conf['region_name'])
|
|
||||||
if not hosts:
|
|
||||||
if progress_callback is not None:
|
|
||||||
progress_callback(0, 0, None, None)
|
|
||||||
return
|
|
||||||
|
|
||||||
wait = False
|
|
||||||
host_i = 0
|
|
||||||
|
|
||||||
for host in hosts:
|
|
||||||
if host.name in exempt_hostnames:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if host.is_unlocked():
|
|
||||||
if not host.force_lock(self.admin_token,
|
|
||||||
self.conf['region_name']):
|
|
||||||
failed_hostnames.append(host.name)
|
|
||||||
LOG.warning("Could not lock %s" % host.name)
|
|
||||||
else:
|
|
||||||
wait = True
|
|
||||||
else:
|
|
||||||
host_i += 1
|
|
||||||
if progress_callback is not None:
|
|
||||||
progress_callback(len(hosts), host_i,
|
|
||||||
('locking %s' % host.name),
|
|
||||||
'DONE')
|
|
||||||
|
|
||||||
if wait and timeout > 5:
|
|
||||||
time.sleep(5)
|
|
||||||
timeout -= 5
|
|
||||||
|
|
||||||
for _ in range(0, timeout):
|
|
||||||
wait = False
|
|
||||||
|
|
||||||
for host in hosts:
|
|
||||||
if host.name in exempt_hostnames:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if (host.name not in failed_hostnames) and host.is_unlocked():
|
|
||||||
host.refresh_data(self.admin_token,
|
|
||||||
self.conf['region_name'])
|
|
||||||
|
|
||||||
if host.is_locked():
|
|
||||||
LOG.info("Locked %s" % host.name)
|
|
||||||
host_i += 1
|
|
||||||
if progress_callback is not None:
|
|
||||||
progress_callback(len(hosts), host_i,
|
|
||||||
('locking %s' % host.name),
|
|
||||||
'DONE')
|
|
||||||
else:
|
|
||||||
LOG.info("Waiting for lock of %s" % host.name)
|
|
||||||
wait = True
|
|
||||||
|
|
||||||
if not wait:
|
|
||||||
break
|
|
||||||
|
|
||||||
time.sleep(1)
|
|
||||||
else:
|
|
||||||
failed_hostnames.append(host.name)
|
|
||||||
LOG.warning("Wait failed for lock of %s" % host.name)
|
|
||||||
|
|
||||||
return failed_hostnames
|
|
||||||
|
|
||||||
def power_off_hosts(self, exempt_hostnames=None, progress_callback=None,
|
|
||||||
timeout=60):
|
|
||||||
""" Power-off hosts of an OpenStack instance except for host names
|
|
||||||
in the exempt list
|
|
||||||
"""
|
|
||||||
|
|
||||||
if exempt_hostnames is None:
|
|
||||||
exempt_hostnames = []
|
|
||||||
|
|
||||||
hosts = sysinv.get_hosts(self.admin_token, self.conf['region_name'])
|
|
||||||
|
|
||||||
hosts[:] = [host for host in hosts if host.support_power_off()]
|
|
||||||
if not hosts:
|
|
||||||
if progress_callback is not None:
|
|
||||||
progress_callback(0, 0, None, None)
|
|
||||||
return
|
|
||||||
|
|
||||||
wait = False
|
|
||||||
host_i = 0
|
|
||||||
|
|
||||||
for host in hosts:
|
|
||||||
if host.name in exempt_hostnames:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if host.is_powered_on():
|
|
||||||
if not host.power_off(self.admin_token,
|
|
||||||
self.conf['region_name']):
|
|
||||||
raise SysInvFail("Could not power-off %s" % host.name)
|
|
||||||
wait = True
|
|
||||||
else:
|
|
||||||
host_i += 1
|
|
||||||
if progress_callback is not None:
|
|
||||||
progress_callback(len(hosts), host_i,
|
|
||||||
('powering off %s' % host.name),
|
|
||||||
'DONE')
|
|
||||||
|
|
||||||
if wait and timeout > 5:
|
|
||||||
time.sleep(5)
|
|
||||||
timeout -= 5
|
|
||||||
|
|
||||||
for _ in range(0, timeout):
|
|
||||||
wait = False
|
|
||||||
|
|
||||||
for host in hosts:
|
|
||||||
if host.name in exempt_hostnames:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if host.is_powered_on():
|
|
||||||
host.refresh_data(self.admin_token,
|
|
||||||
self.conf['region_name'])
|
|
||||||
|
|
||||||
if host.is_powered_off():
|
|
||||||
LOG.info("Powered-Off %s" % host.name)
|
|
||||||
host_i += 1
|
|
||||||
if progress_callback is not None:
|
|
||||||
progress_callback(len(hosts), host_i,
|
|
||||||
('powering off %s' % host.name),
|
|
||||||
'DONE')
|
|
||||||
else:
|
|
||||||
LOG.info("Waiting for power-off of %s" % host.name)
|
|
||||||
wait = True
|
|
||||||
|
|
||||||
if not wait:
|
|
||||||
break
|
|
||||||
|
|
||||||
time.sleep(1)
|
|
||||||
else:
|
|
||||||
failed_hosts = [h.name for h in hosts if h.is_powered_on()]
|
|
||||||
msg = "Wait timeout for power-off of %s" % failed_hosts
|
|
||||||
LOG.info(msg)
|
|
||||||
raise SysInvFail(msg)
|
|
||||||
|
|
||||||
def wait_for_hosts_disabled(self, exempt_hostnames=None, timeout=300,
|
|
||||||
interval_step=10):
|
|
||||||
"""Wait for hosts to be identified as disabled.
|
|
||||||
Run check every interval_step seconds
|
|
||||||
"""
|
|
||||||
if exempt_hostnames is None:
|
|
||||||
exempt_hostnames = []
|
|
||||||
|
|
||||||
for _ in range(timeout / interval_step):
|
|
||||||
hosts = sysinv.get_hosts(self.admin_token,
|
|
||||||
self.conf['region_name'])
|
|
||||||
if not hosts:
|
|
||||||
time.sleep(interval_step)
|
|
||||||
continue
|
|
||||||
|
|
||||||
for host in hosts:
|
|
||||||
if host.name in exempt_hostnames:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if host.is_enabled():
|
|
||||||
LOG.info("host %s is still enabled" % host.name)
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
LOG.info("all hosts disabled.")
|
|
||||||
return True
|
|
||||||
|
|
||||||
time.sleep(interval_step)
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
@property
|
|
||||||
def sysinv(self):
|
|
||||||
if self._sysinv is None:
|
|
||||||
# TOX cannot import cgts_client and all the dependencies therefore
|
|
||||||
# the client is being lazy loaded since TOX doesn't actually
|
|
||||||
# require the cgtsclient module.
|
|
||||||
from cgtsclient import client as cgts_client
|
|
||||||
|
|
||||||
endpoint = self.admin_token.get_service_url(
|
|
||||||
self.conf['region_name'], "sysinv", "platform", 'admin')
|
|
||||||
self._sysinv = cgts_client.Client(
|
|
||||||
sysinv.API_VERSION,
|
|
||||||
endpoint=endpoint,
|
|
||||||
token=self.admin_token.get_id())
|
|
||||||
|
|
||||||
return self._sysinv
|
|
|
@ -1,31 +0,0 @@
|
||||||
import sys
|
|
||||||
|
|
||||||
from controllerconfig.common import log
|
|
||||||
|
|
||||||
LOG = log.get_logger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class ProgressRunner(object):
|
|
||||||
steps = []
|
|
||||||
|
|
||||||
def add(self, action, message):
|
|
||||||
self.steps.append((action, message))
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
total = len(self.steps)
|
|
||||||
for i, step in enumerate(self.steps, start=1):
|
|
||||||
action, message = step
|
|
||||||
LOG.info("Start step: %s" % message)
|
|
||||||
sys.stdout.write(
|
|
||||||
"\n%.2u/%.2u: %s ... " % (i, total, message))
|
|
||||||
sys.stdout.flush()
|
|
||||||
try:
|
|
||||||
action()
|
|
||||||
sys.stdout.write('DONE')
|
|
||||||
sys.stdout.flush()
|
|
||||||
except Exception:
|
|
||||||
sys.stdout.flush()
|
|
||||||
raise
|
|
||||||
LOG.info("Finish step: %s" % message)
|
|
||||||
sys.stdout.write("\n")
|
|
||||||
sys.stdout.flush()
|
|
|
@ -1,629 +0,0 @@
|
||||||
"""
|
|
||||||
Copyright (c) 2015-2019 Wind River Systems, Inc.
|
|
||||||
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import print_function
|
|
||||||
from six.moves import configparser
|
|
||||||
import os
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
import textwrap
|
|
||||||
import time
|
|
||||||
from controllerconfig import utils
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
from controllerconfig.common import constants
|
|
||||||
from controllerconfig.common import log
|
|
||||||
from controllerconfig.common import rest_api_utils as rutils
|
|
||||||
from controllerconfig.common.exceptions import KeystoneFail
|
|
||||||
from controllerconfig.common.configobjects import REGION_CONFIG
|
|
||||||
from controllerconfig.common.configobjects import SUBCLOUD_CONFIG
|
|
||||||
from controllerconfig import ConfigFail
|
|
||||||
from controllerconfig.configassistant import ConfigAssistant
|
|
||||||
from controllerconfig.systemconfig import parse_system_config
|
|
||||||
from controllerconfig.systemconfig import configure_management_interface
|
|
||||||
from controllerconfig.systemconfig import create_cgcs_config_file
|
|
||||||
from controllerconfig import DEFAULT_DOMAIN_NAME
|
|
||||||
|
|
||||||
# Temporary file for building cgcs_config
|
|
||||||
TEMP_CGCS_CONFIG_FILE = "/tmp/cgcs_config"
|
|
||||||
|
|
||||||
# For region mode, this is the list of users that we expect to find configured
|
|
||||||
# in the region config file as <USER>_USER_KEY and <USER>_PASSWORD.
|
|
||||||
# For distributed cloud, this is the list of users that we expect to find
|
|
||||||
# configured in keystone. The password for each user will be retrieved from
|
|
||||||
# the DC Manager in the system controller and added to the region config file.
|
|
||||||
# The format is:
|
|
||||||
# REGION_NAME = key in region config file for this user's region
|
|
||||||
# USER_KEY = key in region config file for this user's name
|
|
||||||
# USER_NAME = user name in keystone
|
|
||||||
|
|
||||||
REGION_NAME = 0
|
|
||||||
USER_KEY = 1
|
|
||||||
USER_NAME = 2
|
|
||||||
|
|
||||||
EXPECTED_USERS = [
|
|
||||||
('REGION_2_SERVICES', 'SYSINV', 'sysinv'),
|
|
||||||
('REGION_2_SERVICES', 'PATCHING', 'patching'),
|
|
||||||
('REGION_2_SERVICES', 'NFV', 'vim'),
|
|
||||||
('REGION_2_SERVICES', 'MTCE', 'mtce'),
|
|
||||||
('REGION_2_SERVICES', 'FM', 'fm'),
|
|
||||||
('REGION_2_SERVICES', 'BARBICAN', 'barbican')]
|
|
||||||
|
|
||||||
# This a description of the region 2 endpoints that we expect to configure or
|
|
||||||
# find configured in keystone. The format is as follows:
|
|
||||||
# SERVICE_NAME = key in region config file for this service's name
|
|
||||||
# SERVICE_TYPE = key in region config file for this service's type
|
|
||||||
# PUBLIC_URL = required publicurl - {} is replaced with CAM floating IP
|
|
||||||
# INTERNAL_URL = required internalurl - {} is replaced with CLM floating IP
|
|
||||||
# ADMIN_URL = required adminurl - {} is replaced with CLM floating IP
|
|
||||||
# DESCRIPTION = Description of the service (for automatic configuration)
|
|
||||||
|
|
||||||
SERVICE_NAME = 0
|
|
||||||
SERVICE_TYPE = 1
|
|
||||||
PUBLIC_URL = 2
|
|
||||||
INTERNAL_URL = 3
|
|
||||||
ADMIN_URL = 4
|
|
||||||
DESCRIPTION = 5
|
|
||||||
|
|
||||||
EXPECTED_REGION2_ENDPOINTS = [
|
|
||||||
('SYSINV_SERVICE_NAME', 'SYSINV_SERVICE_TYPE',
|
|
||||||
'http://{}:6385/v1',
|
|
||||||
'http://{}:6385/v1',
|
|
||||||
'http://{}:6385/v1',
|
|
||||||
'SysInv Service'),
|
|
||||||
('PATCHING_SERVICE_NAME', 'PATCHING_SERVICE_TYPE',
|
|
||||||
'http://{}:15491',
|
|
||||||
'http://{}:5491',
|
|
||||||
'http://{}:5491',
|
|
||||||
'Patching Service'),
|
|
||||||
('NFV_SERVICE_NAME', 'NFV_SERVICE_TYPE',
|
|
||||||
'http://{}:4545',
|
|
||||||
'http://{}:4545',
|
|
||||||
'http://{}:4545',
|
|
||||||
'Virtual Infrastructure Manager'),
|
|
||||||
('FM_SERVICE_NAME', 'FM_SERVICE_TYPE',
|
|
||||||
'http://{}:18002',
|
|
||||||
'http://{}:18002',
|
|
||||||
'http://{}:18002',
|
|
||||||
'Fault Management Service'),
|
|
||||||
('BARBICAN_SERVICE_NAME', 'BARBICAN_SERVICE_TYPE',
|
|
||||||
'http://{}:9311',
|
|
||||||
'http://{}:9311',
|
|
||||||
'http://{}:9311',
|
|
||||||
'OpenStack Key Manager Service'),
|
|
||||||
]
|
|
||||||
|
|
||||||
EXPECTED_KEYSTONE_ENDPOINT = (
|
|
||||||
'KEYSTONE_SERVICE_NAME', 'KEYSTONE_SERVICE_TYPE',
|
|
||||||
'http://{}:8081/keystone/main/v2.0',
|
|
||||||
'http://{}:8081/keystone/main/v2.0',
|
|
||||||
'http://{}:8081/keystone/admin/v2.0',
|
|
||||||
'OpenStack Identity')
|
|
||||||
|
|
||||||
|
|
||||||
LOG = log.get_logger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def validate_region_one_keystone_config(region_config, token, api_url, users,
|
|
||||||
services, endpoints, create=False,
|
|
||||||
config_type=REGION_CONFIG,
|
|
||||||
user_config=None):
|
|
||||||
""" Validate that the required region one configuration are in place,
|
|
||||||
if create is True, any missing entries will be set up to be added
|
|
||||||
to keystone later on by puppet.
|
|
||||||
"""
|
|
||||||
|
|
||||||
region_1_name = region_config.get('SHARED_SERVICES', 'REGION_NAME')
|
|
||||||
region_2_name = region_config.get('REGION_2_SERVICES', 'REGION_NAME')
|
|
||||||
|
|
||||||
# Determine what keystone entries are expected
|
|
||||||
expected_users = EXPECTED_USERS
|
|
||||||
expected_region_2_endpoints = EXPECTED_REGION2_ENDPOINTS
|
|
||||||
# Keystone is always in region 1
|
|
||||||
expected_region_1_endpoints = [EXPECTED_KEYSTONE_ENDPOINT]
|
|
||||||
|
|
||||||
domains = rutils.get_domains(token, api_url)
|
|
||||||
# Verify service project domain, creating if necessary
|
|
||||||
if region_config.has_option('REGION_2_SERVICES', 'PROJECT_DOMAIN_NAME'):
|
|
||||||
project_domain = region_config.get('REGION_2_SERVICES',
|
|
||||||
'PROJECT_DOMAIN_NAME')
|
|
||||||
else:
|
|
||||||
project_domain = DEFAULT_DOMAIN_NAME
|
|
||||||
project_domain_id = domains.get_domain_id(project_domain)
|
|
||||||
if not project_domain_id:
|
|
||||||
if create and config_type == REGION_CONFIG:
|
|
||||||
region_config.set('REGION_2_SERVICES', 'PROJECT_DOMAIN_NAME',
|
|
||||||
project_domain)
|
|
||||||
else:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Keystone configuration error: service project domain '%s' is "
|
|
||||||
"not configured." % project_domain)
|
|
||||||
|
|
||||||
# Verify service project, creating if necessary
|
|
||||||
if region_config.has_option('SHARED_SERVICES',
|
|
||||||
'SERVICE_PROJECT_NAME'):
|
|
||||||
service_project = region_config.get('SHARED_SERVICES',
|
|
||||||
'SERVICE_PROJECT_NAME')
|
|
||||||
else:
|
|
||||||
service_project = region_config.get('SHARED_SERVICES',
|
|
||||||
'SERVICE_TENANT_NAME')
|
|
||||||
projects = rutils.get_projects(token, api_url)
|
|
||||||
project_id = projects.get_project_id(service_project)
|
|
||||||
if not project_id:
|
|
||||||
if create and config_type == REGION_CONFIG:
|
|
||||||
region_config.set('SHARED_SERVICES', 'SERVICE_TENANT_NAME',
|
|
||||||
service_project)
|
|
||||||
else:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Keystone configuration error: service project '%s' is not "
|
|
||||||
"configured." % service_project)
|
|
||||||
|
|
||||||
# Verify and retrieve the id of the admin role (only needed when creating)
|
|
||||||
roles = rutils.get_roles(token, api_url)
|
|
||||||
role_id = roles.get_role_id('admin')
|
|
||||||
if not role_id and create:
|
|
||||||
raise ConfigFail("Keystone configuration error: No admin role present")
|
|
||||||
|
|
||||||
# verify that the service user domain is configured, creating if necessary
|
|
||||||
if region_config.has_option('REGION_2_SERVICES', 'USER_DOMAIN_NAME'):
|
|
||||||
user_domain = region_config.get('REGION_2_SERVICES',
|
|
||||||
'USER_DOMAIN_NAME')
|
|
||||||
else:
|
|
||||||
user_domain = DEFAULT_DOMAIN_NAME
|
|
||||||
domains = rutils.get_domains(token, api_url)
|
|
||||||
user_domain_id = domains.get_domain_id(user_domain)
|
|
||||||
if not user_domain_id:
|
|
||||||
if create and config_type == REGION_CONFIG:
|
|
||||||
region_config.set('REGION_2_SERVICES',
|
|
||||||
'USER_DOMAIN_NAME')
|
|
||||||
else:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Unable to obtain id for for %s domain. Please ensure "
|
|
||||||
"keystone configuration is correct." % user_domain)
|
|
||||||
|
|
||||||
auth_url = region_config.get('SHARED_SERVICES', 'KEYSTONE_ADMINURL')
|
|
||||||
if config_type == REGION_CONFIG:
|
|
||||||
# Verify that all users are configured and can retrieve a token,
|
|
||||||
# Optionally set up to create missing users + their admin role
|
|
||||||
for user in expected_users:
|
|
||||||
auth_user = region_config.get(user[REGION_NAME],
|
|
||||||
user[USER_KEY] + '_USER_NAME')
|
|
||||||
user_id = users.get_user_id(auth_user)
|
|
||||||
auth_password = None
|
|
||||||
if not user_id and create:
|
|
||||||
if not region_config.has_option(
|
|
||||||
user[REGION_NAME], user[USER_KEY] + '_PASSWORD'):
|
|
||||||
# Generate random password for new user via
|
|
||||||
# /dev/urandom if necessary
|
|
||||||
try:
|
|
||||||
region_config.set(
|
|
||||||
user[REGION_NAME], user[USER_KEY] + '_PASSWORD',
|
|
||||||
uuid.uuid4().hex[:10] + "TiC2*")
|
|
||||||
except Exception as e:
|
|
||||||
raise ConfigFail("Failed to generate random user "
|
|
||||||
"password: %s" % e)
|
|
||||||
elif user_id and user_domain_id and\
|
|
||||||
project_id and project_domain_id:
|
|
||||||
# If there is a user_id existing then we cannot use
|
|
||||||
# a randomized password as it was either created by
|
|
||||||
# a previous run of regionconfig or was created as
|
|
||||||
# part of Titanium Cloud Primary region config
|
|
||||||
if not region_config.has_option(
|
|
||||||
user[REGION_NAME], user[USER_KEY] + '_PASSWORD'):
|
|
||||||
raise ConfigFail("Failed to find configured password "
|
|
||||||
"for pre-defined user %s" % auth_user)
|
|
||||||
auth_password = region_config.get(user[REGION_NAME],
|
|
||||||
user[USER_KEY] + '_PASSWORD')
|
|
||||||
# Verify that the existing user can seek an auth token
|
|
||||||
user_token = rutils.get_token(auth_url, service_project,
|
|
||||||
auth_user,
|
|
||||||
auth_password, user_domain,
|
|
||||||
project_domain)
|
|
||||||
if not user_token:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Unable to obtain keystone token for %s user. "
|
|
||||||
"Please ensure keystone configuration is correct."
|
|
||||||
% auth_user)
|
|
||||||
else:
|
|
||||||
# For subcloud configs we re-use the users from the system controller
|
|
||||||
# (the primary region).
|
|
||||||
for user in expected_users:
|
|
||||||
auth_user = user[USER_NAME]
|
|
||||||
user_id = users.get_user_id(auth_user)
|
|
||||||
auth_password = None
|
|
||||||
|
|
||||||
if user_id:
|
|
||||||
# Add the password to the region config so it will be used when
|
|
||||||
# configuring services.
|
|
||||||
auth_password = user_config.get_password(user[USER_NAME])
|
|
||||||
region_config.set(user[REGION_NAME],
|
|
||||||
user[USER_KEY] + '_PASSWORD',
|
|
||||||
auth_password)
|
|
||||||
else:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Unable to obtain user (%s). Please ensure "
|
|
||||||
"keystone configuration is correct." % user[USER_NAME])
|
|
||||||
|
|
||||||
# Verify that the existing user can seek an auth token
|
|
||||||
user_token = rutils.get_token(auth_url, service_project, auth_user,
|
|
||||||
auth_password, user_domain,
|
|
||||||
project_domain)
|
|
||||||
if not user_token:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Unable to obtain keystone token for %s user. "
|
|
||||||
"Please ensure keystone configuration is correct." %
|
|
||||||
auth_user)
|
|
||||||
|
|
||||||
# Verify that region two endpoints & services for shared services
|
|
||||||
# match our requirements, optionally creating missing entries
|
|
||||||
for endpoint in expected_region_1_endpoints:
|
|
||||||
service_name = region_config.get('SHARED_SERVICES',
|
|
||||||
endpoint[SERVICE_NAME])
|
|
||||||
service_type = region_config.get('SHARED_SERVICES',
|
|
||||||
endpoint[SERVICE_TYPE])
|
|
||||||
|
|
||||||
try:
|
|
||||||
service_id = services.get_service_id(service_name, service_type)
|
|
||||||
except KeystoneFail as ex:
|
|
||||||
# No option to create services for region one, if those are not
|
|
||||||
# present, something is seriously wrong
|
|
||||||
raise ex
|
|
||||||
|
|
||||||
# Extract region one url information from the existing endpoint entry:
|
|
||||||
try:
|
|
||||||
endpoints.get_service_url(
|
|
||||||
region_1_name, service_id, "public")
|
|
||||||
endpoints.get_service_url(
|
|
||||||
region_1_name, service_id, "internal")
|
|
||||||
endpoints.get_service_url(
|
|
||||||
region_1_name, service_id, "admin")
|
|
||||||
except KeystoneFail as ex:
|
|
||||||
# Fail since shared services endpoints are not found
|
|
||||||
raise ConfigFail("Endpoint for shared service %s "
|
|
||||||
"is not configured" % service_name)
|
|
||||||
|
|
||||||
# Verify that region two endpoints & services match our requirements,
|
|
||||||
# optionally creating missing entries
|
|
||||||
public_address = utils.get_optional(region_config, 'CAN_NETWORK',
|
|
||||||
'CAN_IP_START_ADDRESS')
|
|
||||||
if not public_address:
|
|
||||||
public_address = utils.get_optional(region_config, 'CAN_NETWORK',
|
|
||||||
'CAN_IP_FLOATING_ADDRESS')
|
|
||||||
if not public_address:
|
|
||||||
public_address = utils.get_optional(region_config, 'OAM_NETWORK',
|
|
||||||
'IP_START_ADDRESS')
|
|
||||||
if not public_address:
|
|
||||||
# AIO-SX configuration
|
|
||||||
public_address = utils.get_optional(region_config, 'OAM_NETWORK',
|
|
||||||
'IP_ADDRESS')
|
|
||||||
if not public_address:
|
|
||||||
public_address = region_config.get('OAM_NETWORK',
|
|
||||||
'IP_FLOATING_ADDRESS')
|
|
||||||
|
|
||||||
if region_config.has_section('CLM_NETWORK'):
|
|
||||||
internal_address = region_config.get('CLM_NETWORK',
|
|
||||||
'CLM_IP_START_ADDRESS')
|
|
||||||
else:
|
|
||||||
internal_address = region_config.get('MGMT_NETWORK',
|
|
||||||
'IP_START_ADDRESS')
|
|
||||||
|
|
||||||
for endpoint in expected_region_2_endpoints:
|
|
||||||
service_name = utils.get_service(region_config, 'REGION_2_SERVICES',
|
|
||||||
endpoint[SERVICE_NAME])
|
|
||||||
service_type = utils.get_service(region_config, 'REGION_2_SERVICES',
|
|
||||||
endpoint[SERVICE_TYPE])
|
|
||||||
service_id = services.get_service_id(service_name, service_type)
|
|
||||||
|
|
||||||
expected_public_url = endpoint[PUBLIC_URL].format(public_address)
|
|
||||||
|
|
||||||
expected_internal_url = endpoint[INTERNAL_URL].format(internal_address)
|
|
||||||
expected_admin_url = endpoint[ADMIN_URL].format(internal_address)
|
|
||||||
|
|
||||||
try:
|
|
||||||
public_url = endpoints.get_service_url(region_2_name, service_id,
|
|
||||||
"public")
|
|
||||||
internal_url = endpoints.get_service_url(region_2_name, service_id,
|
|
||||||
"internal")
|
|
||||||
admin_url = endpoints.get_service_url(region_2_name, service_id,
|
|
||||||
"admin")
|
|
||||||
except KeystoneFail as ex:
|
|
||||||
# The endpoint will be created optionally
|
|
||||||
if not create:
|
|
||||||
raise ConfigFail("Keystone configuration error: Unable to "
|
|
||||||
"find endpoints for service %s"
|
|
||||||
% service_name)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Validate the existing endpoints
|
|
||||||
for endpointtype, found, expected in [
|
|
||||||
('public', public_url, expected_public_url),
|
|
||||||
('internal', internal_url, expected_internal_url),
|
|
||||||
('admin', admin_url, expected_admin_url)]:
|
|
||||||
if found != expected:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Keystone configuration error for:\nregion ({}), "
|
|
||||||
"service name ({}), service type ({})\n"
|
|
||||||
"expected {}: {}\nconfigured {}: {}".format(
|
|
||||||
region_2_name, service_name, service_type,
|
|
||||||
endpointtype, expected, endpointtype, found))
|
|
||||||
|
|
||||||
|
|
||||||
def validate_region_one_ldap_config(region_config):
|
|
||||||
"""Validate ldap on region one by a ldap search"""
|
|
||||||
|
|
||||||
ldapserver_uri = region_config.get('SHARED_SERVICES', 'LDAP_SERVICE_URL')
|
|
||||||
cmd = ["ldapsearch", "-xH", ldapserver_uri,
|
|
||||||
"-b", "dc=cgcs,dc=local", "(objectclass=*)"]
|
|
||||||
try:
|
|
||||||
with open(os.devnull, "w") as fnull:
|
|
||||||
subprocess.check_call(cmd, stdout=fnull, stderr=fnull)
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
raise ConfigFail("LDAP configuration error: not accessible")
|
|
||||||
|
|
||||||
|
|
||||||
def set_subcloud_config_defaults(region_config):
|
|
||||||
"""Set defaults in region_config for subclouds"""
|
|
||||||
|
|
||||||
# We always create endpoints for subclouds
|
|
||||||
region_config.set('REGION_2_SERVICES', 'CREATE', 'Y')
|
|
||||||
|
|
||||||
# We use the default service project
|
|
||||||
region_config.set('SHARED_SERVICES', 'SERVICE_PROJECT_NAME',
|
|
||||||
constants.DEFAULT_SERVICE_PROJECT_NAME)
|
|
||||||
|
|
||||||
# Add the necessary users to the region config, which will allow the
|
|
||||||
# validation code to run and will later result in services being
|
|
||||||
# configured to use the users from the system controller.
|
|
||||||
expected_users = EXPECTED_USERS
|
|
||||||
|
|
||||||
for user in expected_users:
|
|
||||||
# Add the user to the region config so to allow validation.
|
|
||||||
region_config.set(user[REGION_NAME], user[USER_KEY] + '_USER_NAME',
|
|
||||||
user[USER_NAME])
|
|
||||||
|
|
||||||
|
|
||||||
def configure_region(config_file, config_type=REGION_CONFIG):
|
|
||||||
"""Configure the region"""
|
|
||||||
|
|
||||||
# Parse the region/subcloud config file
|
|
||||||
print("Parsing configuration file... ", end=' ')
|
|
||||||
region_config = parse_system_config(config_file)
|
|
||||||
print("DONE")
|
|
||||||
|
|
||||||
if config_type == SUBCLOUD_CONFIG:
|
|
||||||
# Set defaults in region_config for subclouds
|
|
||||||
set_subcloud_config_defaults(region_config)
|
|
||||||
|
|
||||||
# Validate the region/subcloud config file
|
|
||||||
print("Validating configuration file... ", end=' ')
|
|
||||||
try:
|
|
||||||
create_cgcs_config_file(None, region_config, None, None, None,
|
|
||||||
config_type=config_type,
|
|
||||||
validate_only=True)
|
|
||||||
except configparser.Error as e:
|
|
||||||
raise ConfigFail("Error parsing configuration file %s: %s" %
|
|
||||||
(config_file, e))
|
|
||||||
print("DONE")
|
|
||||||
|
|
||||||
# Bring up management interface to allow us to reach Region 1
|
|
||||||
print("Configuring management interface... ", end=' ')
|
|
||||||
configure_management_interface(region_config, config_type=config_type)
|
|
||||||
print("DONE")
|
|
||||||
|
|
||||||
# Get token from keystone
|
|
||||||
print("Retrieving keystone token...", end=' ')
|
|
||||||
sys.stdout.flush()
|
|
||||||
auth_url = region_config.get('SHARED_SERVICES', 'KEYSTONE_ADMINURL')
|
|
||||||
if region_config.has_option('SHARED_SERVICES', 'ADMIN_TENANT_NAME'):
|
|
||||||
auth_project = region_config.get('SHARED_SERVICES',
|
|
||||||
'ADMIN_TENANT_NAME')
|
|
||||||
else:
|
|
||||||
auth_project = region_config.get('SHARED_SERVICES',
|
|
||||||
'ADMIN_PROJECT_NAME')
|
|
||||||
auth_user = region_config.get('SHARED_SERVICES', 'ADMIN_USER_NAME')
|
|
||||||
auth_password = region_config.get('SHARED_SERVICES', 'ADMIN_PASSWORD')
|
|
||||||
if region_config.has_option('SHARED_SERVICES', 'ADMIN_USER_DOMAIN'):
|
|
||||||
admin_user_domain = region_config.get('SHARED_SERVICES',
|
|
||||||
'ADMIN_USER_DOMAIN')
|
|
||||||
else:
|
|
||||||
admin_user_domain = DEFAULT_DOMAIN_NAME
|
|
||||||
if region_config.has_option('SHARED_SERVICES',
|
|
||||||
'ADMIN_PROJECT_DOMAIN'):
|
|
||||||
admin_project_domain = region_config.get('SHARED_SERVICES',
|
|
||||||
'ADMIN_PROJECT_DOMAIN')
|
|
||||||
else:
|
|
||||||
admin_project_domain = DEFAULT_DOMAIN_NAME
|
|
||||||
|
|
||||||
attempts = 0
|
|
||||||
token = None
|
|
||||||
# Wait for connectivity to region one. It can take some time, especially if
|
|
||||||
# we have LAG on the management network.
|
|
||||||
while not token:
|
|
||||||
token = rutils.get_token(auth_url, auth_project, auth_user,
|
|
||||||
auth_password, admin_user_domain,
|
|
||||||
admin_project_domain)
|
|
||||||
if not token:
|
|
||||||
attempts += 1
|
|
||||||
if attempts < 10:
|
|
||||||
print("\rRetrieving keystone token...{}".format(
|
|
||||||
'.' * attempts), end=' ')
|
|
||||||
sys.stdout.flush()
|
|
||||||
time.sleep(10)
|
|
||||||
else:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Unable to obtain keystone token. Please ensure "
|
|
||||||
"networking and keystone configuration is correct.")
|
|
||||||
print("DONE")
|
|
||||||
|
|
||||||
# Get services, endpoints, users and domains from keystone
|
|
||||||
print("Retrieving services, endpoints and users from keystone... ",
|
|
||||||
end=' ')
|
|
||||||
region_name = region_config.get('SHARED_SERVICES', 'REGION_NAME')
|
|
||||||
service_name = region_config.get('SHARED_SERVICES',
|
|
||||||
'KEYSTONE_SERVICE_NAME')
|
|
||||||
service_type = region_config.get('SHARED_SERVICES',
|
|
||||||
'KEYSTONE_SERVICE_TYPE')
|
|
||||||
|
|
||||||
api_url = token.get_service_url(
|
|
||||||
region_name, service_name, service_type, "admin").replace(
|
|
||||||
'v2.0', 'v3')
|
|
||||||
|
|
||||||
services = rutils.get_services(token, api_url)
|
|
||||||
endpoints = rutils.get_endpoints(token, api_url)
|
|
||||||
users = rutils.get_users(token, api_url)
|
|
||||||
domains = rutils.get_domains(token, api_url)
|
|
||||||
if not services or not endpoints or not users:
|
|
||||||
raise ConfigFail(
|
|
||||||
"Unable to retrieve services, endpoints or users from keystone. "
|
|
||||||
"Please ensure networking and keystone configuration is correct.")
|
|
||||||
print("DONE")
|
|
||||||
|
|
||||||
user_config = None
|
|
||||||
if config_type == SUBCLOUD_CONFIG:
|
|
||||||
# Retrieve subcloud configuration from dcmanager
|
|
||||||
print("Retrieving configuration from dcmanager... ", end=' ')
|
|
||||||
dcmanager_url = token.get_service_url(
|
|
||||||
'SystemController', 'dcmanager', 'dcmanager', "admin")
|
|
||||||
subcloud_name = region_config.get('REGION_2_SERVICES',
|
|
||||||
'REGION_NAME')
|
|
||||||
subcloud_management_subnet = region_config.get('MGMT_NETWORK',
|
|
||||||
'CIDR')
|
|
||||||
hash_string = subcloud_name + subcloud_management_subnet
|
|
||||||
subcloud_config = rutils.get_subcloud_config(token, dcmanager_url,
|
|
||||||
subcloud_name,
|
|
||||||
hash_string)
|
|
||||||
user_config = subcloud_config['users']
|
|
||||||
print("DONE")
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Configure missing region one keystone entries
|
|
||||||
create = True
|
|
||||||
# Prepare region configuration for puppet to create keystone identities
|
|
||||||
if (region_config.has_option('REGION_2_SERVICES', 'CREATE') and
|
|
||||||
region_config.get('REGION_2_SERVICES', 'CREATE') == 'Y'):
|
|
||||||
print("Preparing keystone configuration... ", end=' ')
|
|
||||||
# If keystone configuration for this region already in place,
|
|
||||||
# validate it only
|
|
||||||
else:
|
|
||||||
# Validate region one keystone config
|
|
||||||
create = False
|
|
||||||
print("Validating keystone configuration... ", end=' ')
|
|
||||||
|
|
||||||
validate_region_one_keystone_config(region_config, token, api_url,
|
|
||||||
users, services, endpoints, create,
|
|
||||||
config_type=config_type,
|
|
||||||
user_config=user_config)
|
|
||||||
print("DONE")
|
|
||||||
|
|
||||||
# validate ldap if it is shared
|
|
||||||
if region_config.has_option('SHARED_SERVICES', 'LDAP_SERVICE_URL'):
|
|
||||||
print("Validating ldap configuration... ", end=' ')
|
|
||||||
validate_region_one_ldap_config(region_config)
|
|
||||||
print("DONE")
|
|
||||||
|
|
||||||
# Create cgcs_config file
|
|
||||||
print("Creating config apply file... ", end=' ')
|
|
||||||
try:
|
|
||||||
create_cgcs_config_file(TEMP_CGCS_CONFIG_FILE, region_config,
|
|
||||||
services, endpoints, domains,
|
|
||||||
config_type=config_type)
|
|
||||||
except configparser.Error as e:
|
|
||||||
raise ConfigFail("Error parsing configuration file %s: %s" %
|
|
||||||
(config_file, e))
|
|
||||||
print("DONE")
|
|
||||||
|
|
||||||
# Configure controller
|
|
||||||
assistant = ConfigAssistant()
|
|
||||||
assistant.configure(TEMP_CGCS_CONFIG_FILE, display_config=False)
|
|
||||||
|
|
||||||
except ConfigFail as e:
|
|
||||||
print("A configuration failure has occurred.", end=' ')
|
|
||||||
raise e
|
|
||||||
|
|
||||||
|
|
||||||
def show_help_region():
|
|
||||||
print("Usage: %s [OPTIONS] <CONFIG_FILE>" % sys.argv[0])
|
|
||||||
print(textwrap.fill(
|
|
||||||
"Perform region configuration using the region "
|
|
||||||
"configuration from CONFIG_FILE.", 80))
|
|
||||||
print("--allow-ssh Allow configuration to be executed in "
|
|
||||||
"ssh\n")
|
|
||||||
|
|
||||||
|
|
||||||
def show_help_subcloud():
|
|
||||||
print("Usage: %s [OPTIONS] <CONFIG_FILE>" % sys.argv[0])
|
|
||||||
print(textwrap.fill(
|
|
||||||
"Perform subcloud configuration using the subcloud "
|
|
||||||
"configuration from CONFIG_FILE.", 80))
|
|
||||||
print("--allow-ssh Allow configuration to be executed in "
|
|
||||||
"ssh\n")
|
|
||||||
|
|
||||||
|
|
||||||
def config_main(config_type=REGION_CONFIG):
|
|
||||||
allow_ssh = False
|
|
||||||
if config_type == REGION_CONFIG:
|
|
||||||
config_file = "/home/sysadmin/region_config"
|
|
||||||
elif config_type == SUBCLOUD_CONFIG:
|
|
||||||
config_file = "/home/sysadmin/subcloud_config"
|
|
||||||
else:
|
|
||||||
raise ConfigFail("Invalid config_type: %s" % config_type)
|
|
||||||
|
|
||||||
arg = 1
|
|
||||||
while arg < len(sys.argv):
|
|
||||||
if sys.argv[arg] in ['--help', '-h', '-?']:
|
|
||||||
if config_type == REGION_CONFIG:
|
|
||||||
show_help_region()
|
|
||||||
else:
|
|
||||||
show_help_subcloud()
|
|
||||||
exit(1)
|
|
||||||
elif sys.argv[arg] == "--allow-ssh":
|
|
||||||
allow_ssh = True
|
|
||||||
elif arg == len(sys.argv) - 1:
|
|
||||||
config_file = sys.argv[arg]
|
|
||||||
else:
|
|
||||||
print("Invalid option. Use --help for more information.")
|
|
||||||
exit(1)
|
|
||||||
arg += 1
|
|
||||||
|
|
||||||
log.configure()
|
|
||||||
|
|
||||||
# Check if that the command is being run from the console
|
|
||||||
if utils.is_ssh_parent():
|
|
||||||
if allow_ssh:
|
|
||||||
print(textwrap.fill(constants.SSH_WARNING_MESSAGE, 80))
|
|
||||||
print('')
|
|
||||||
else:
|
|
||||||
print(textwrap.fill(constants.SSH_ERROR_MESSAGE, 80))
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
if not os.path.isfile(config_file):
|
|
||||||
print("Config file %s does not exist." % config_file)
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
try:
|
|
||||||
configure_region(config_file, config_type=config_type)
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
print("\nAborting configuration")
|
|
||||||
except ConfigFail as e:
|
|
||||||
LOG.exception(e)
|
|
||||||
print("\nConfiguration failed: {}".format(e))
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception(e)
|
|
||||||
print("\nConfiguration failed: {}".format(e))
|
|
||||||
else:
|
|
||||||
print("\nConfiguration finished successfully.")
|
|
||||||
finally:
|
|
||||||
if os.path.isfile(TEMP_CGCS_CONFIG_FILE):
|
|
||||||
os.remove(TEMP_CGCS_CONFIG_FILE)
|
|
||||||
|
|
||||||
|
|
||||||
def region_main():
|
|
||||||
config_main(REGION_CONFIG)
|
|
||||||
|
|
||||||
|
|
||||||
def subcloud_main():
|
|
||||||
config_main(SUBCLOUD_CONFIG)
|
|
|
@ -1,579 +0,0 @@
|
||||||
#
|
|
||||||
# Copyright (c) 2014-2018 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
|
|
||||||
"""
|
|
||||||
System Inventory Interactions
|
|
||||||
"""
|
|
||||||
|
|
||||||
import json
|
|
||||||
import openstack
|
|
||||||
|
|
||||||
from six.moves.urllib import request as urlrequest
|
|
||||||
from six.moves.urllib.error import URLError
|
|
||||||
from six.moves.urllib.error import HTTPError
|
|
||||||
|
|
||||||
from controllerconfig.common import log
|
|
||||||
from controllerconfig.common.exceptions import KeystoneFail
|
|
||||||
|
|
||||||
|
|
||||||
LOG = log.get_logger(__name__)
|
|
||||||
|
|
||||||
API_VERSION = 1
|
|
||||||
|
|
||||||
# Host Personality Constants
|
|
||||||
HOST_PERSONALITY_NOT_SET = ""
|
|
||||||
HOST_PERSONALITY_UNKNOWN = "unknown"
|
|
||||||
HOST_PERSONALITY_CONTROLLER = "controller"
|
|
||||||
HOST_PERSONALITY_WORKER = "worker"
|
|
||||||
HOST_PERSONALITY_STORAGE = "storage"
|
|
||||||
|
|
||||||
# Host Administrative State Constants
|
|
||||||
HOST_ADMIN_STATE_NOT_SET = ""
|
|
||||||
HOST_ADMIN_STATE_UNKNOWN = "unknown"
|
|
||||||
HOST_ADMIN_STATE_LOCKED = "locked"
|
|
||||||
HOST_ADMIN_STATE_UNLOCKED = "unlocked"
|
|
||||||
|
|
||||||
# Host Operational State Constants
|
|
||||||
HOST_OPERATIONAL_STATE_NOT_SET = ""
|
|
||||||
HOST_OPERATIONAL_STATE_UNKNOWN = "unknown"
|
|
||||||
HOST_OPERATIONAL_STATE_ENABLED = "enabled"
|
|
||||||
HOST_OPERATIONAL_STATE_DISABLED = "disabled"
|
|
||||||
|
|
||||||
# Host Availability State Constants
|
|
||||||
HOST_AVAIL_STATE_NOT_SET = ""
|
|
||||||
HOST_AVAIL_STATE_UNKNOWN = "unknown"
|
|
||||||
HOST_AVAIL_STATE_AVAILABLE = "available"
|
|
||||||
HOST_AVAIL_STATE_ONLINE = "online"
|
|
||||||
HOST_AVAIL_STATE_OFFLINE = "offline"
|
|
||||||
HOST_AVAIL_STATE_POWERED_OFF = "powered-off"
|
|
||||||
HOST_AVAIL_STATE_POWERED_ON = "powered-on"
|
|
||||||
|
|
||||||
# Host Board Management Constants
|
|
||||||
HOST_BM_TYPE_NOT_SET = ""
|
|
||||||
HOST_BM_TYPE_UNKNOWN = "unknown"
|
|
||||||
HOST_BM_TYPE_ILO3 = 'ilo3'
|
|
||||||
HOST_BM_TYPE_ILO4 = 'ilo4'
|
|
||||||
|
|
||||||
# Host invprovision state
|
|
||||||
HOST_PROVISIONING = "provisioning"
|
|
||||||
HOST_PROVISIONED = "provisioned"
|
|
||||||
|
|
||||||
|
|
||||||
class Host(object):
|
|
||||||
def __init__(self, hostname, host_data=None):
|
|
||||||
self.name = hostname
|
|
||||||
self.personality = HOST_PERSONALITY_NOT_SET
|
|
||||||
self.admin_state = HOST_ADMIN_STATE_NOT_SET
|
|
||||||
self.operational_state = HOST_OPERATIONAL_STATE_NOT_SET
|
|
||||||
self.avail_status = []
|
|
||||||
self.bm_type = HOST_BM_TYPE_NOT_SET
|
|
||||||
self.uuid = None
|
|
||||||
self.config_status = None
|
|
||||||
self.invprovision = None
|
|
||||||
self.boot_device = None
|
|
||||||
self.rootfs_device = None
|
|
||||||
self.console = None
|
|
||||||
self.tboot = None
|
|
||||||
|
|
||||||
if host_data is not None:
|
|
||||||
self.__host_set_state__(host_data)
|
|
||||||
|
|
||||||
def __host_set_state__(self, host_data):
|
|
||||||
if host_data is None:
|
|
||||||
self.admin_state = HOST_ADMIN_STATE_UNKNOWN
|
|
||||||
self.operational_state = HOST_OPERATIONAL_STATE_UNKNOWN
|
|
||||||
self.avail_status = []
|
|
||||||
self.bm_type = HOST_BM_TYPE_NOT_SET
|
|
||||||
|
|
||||||
# Set personality
|
|
||||||
if host_data['personality'] == "controller":
|
|
||||||
self.personality = HOST_PERSONALITY_CONTROLLER
|
|
||||||
elif host_data['personality'] == "worker":
|
|
||||||
self.personality = HOST_PERSONALITY_WORKER
|
|
||||||
elif host_data['personality'] == "storage":
|
|
||||||
self.personality = HOST_PERSONALITY_STORAGE
|
|
||||||
else:
|
|
||||||
self.personality = HOST_PERSONALITY_UNKNOWN
|
|
||||||
|
|
||||||
# Set administrative state
|
|
||||||
if host_data['administrative'] == "locked":
|
|
||||||
self.admin_state = HOST_ADMIN_STATE_LOCKED
|
|
||||||
elif host_data['administrative'] == "unlocked":
|
|
||||||
self.admin_state = HOST_ADMIN_STATE_UNLOCKED
|
|
||||||
else:
|
|
||||||
self.admin_state = HOST_ADMIN_STATE_UNKNOWN
|
|
||||||
|
|
||||||
# Set operational state
|
|
||||||
if host_data['operational'] == "enabled":
|
|
||||||
self.operational_state = HOST_OPERATIONAL_STATE_ENABLED
|
|
||||||
elif host_data['operational'] == "disabled":
|
|
||||||
self.operational_state = HOST_OPERATIONAL_STATE_DISABLED
|
|
||||||
else:
|
|
||||||
self.operational_state = HOST_OPERATIONAL_STATE_UNKNOWN
|
|
||||||
|
|
||||||
# Set availability status
|
|
||||||
self.avail_status[:] = []
|
|
||||||
if host_data['availability'] == "available":
|
|
||||||
self.avail_status.append(HOST_AVAIL_STATE_AVAILABLE)
|
|
||||||
elif host_data['availability'] == "online":
|
|
||||||
self.avail_status.append(HOST_AVAIL_STATE_ONLINE)
|
|
||||||
elif host_data['availability'] == "offline":
|
|
||||||
self.avail_status.append(HOST_AVAIL_STATE_OFFLINE)
|
|
||||||
elif host_data['availability'] == "power-on":
|
|
||||||
self.avail_status.append(HOST_AVAIL_STATE_POWERED_ON)
|
|
||||||
elif host_data['availability'] == "power-off":
|
|
||||||
self.avail_status.append(HOST_AVAIL_STATE_POWERED_OFF)
|
|
||||||
else:
|
|
||||||
self.avail_status.append(HOST_AVAIL_STATE_AVAILABLE)
|
|
||||||
|
|
||||||
# Set board management type
|
|
||||||
if host_data['bm_type'] is None:
|
|
||||||
self.bm_type = HOST_BM_TYPE_NOT_SET
|
|
||||||
elif host_data['bm_type'] == 'ilo3':
|
|
||||||
self.bm_type = HOST_BM_TYPE_ILO3
|
|
||||||
elif host_data['bm_type'] == 'ilo4':
|
|
||||||
self.bm_type = HOST_BM_TYPE_ILO4
|
|
||||||
else:
|
|
||||||
self.bm_type = HOST_BM_TYPE_UNKNOWN
|
|
||||||
|
|
||||||
if host_data['invprovision'] == 'provisioned':
|
|
||||||
self.invprovision = HOST_PROVISIONED
|
|
||||||
else:
|
|
||||||
self.invprovision = HOST_PROVISIONING
|
|
||||||
|
|
||||||
self.uuid = host_data['uuid']
|
|
||||||
self.config_status = host_data['config_status']
|
|
||||||
self.boot_device = host_data['boot_device']
|
|
||||||
self.rootfs_device = host_data['rootfs_device']
|
|
||||||
self.console = host_data['console']
|
|
||||||
self.tboot = host_data['tboot']
|
|
||||||
|
|
||||||
def __host_update__(self, admin_token, region_name):
|
|
||||||
try:
|
|
||||||
url = admin_token.get_service_admin_url("platform", "sysinv",
|
|
||||||
region_name)
|
|
||||||
url += "/ihosts/" + self.name
|
|
||||||
|
|
||||||
request_info = urlrequest.Request(url)
|
|
||||||
request_info.add_header("X-Auth-Token", admin_token.get_id())
|
|
||||||
request_info.add_header("Accept", "application/json")
|
|
||||||
|
|
||||||
request = urlrequest.urlopen(request_info)
|
|
||||||
response = json.loads(request.read())
|
|
||||||
request.close()
|
|
||||||
return response
|
|
||||||
|
|
||||||
except KeystoneFail as e:
|
|
||||||
LOG.error("Keystone authentication failed:{} ".format(e))
|
|
||||||
return None
|
|
||||||
|
|
||||||
except HTTPError as e:
|
|
||||||
LOG.error("%s, %s" % (e.code, e.read()))
|
|
||||||
if e.code == 401:
|
|
||||||
admin_token.set_expired()
|
|
||||||
return None
|
|
||||||
|
|
||||||
except URLError as e:
|
|
||||||
LOG.error(e)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def __host_action__(self, admin_token, action, region_name):
|
|
||||||
try:
|
|
||||||
url = admin_token.get_service_admin_url("platform", "sysinv",
|
|
||||||
region_name)
|
|
||||||
url += "/ihosts/" + self.name
|
|
||||||
|
|
||||||
request_info = urlrequest.Request(url)
|
|
||||||
request_info.get_method = lambda: 'PATCH'
|
|
||||||
request_info.add_header("X-Auth-Token", admin_token.get_id())
|
|
||||||
request_info.add_header("Content-type", "application/json")
|
|
||||||
request_info.add_header("Accept", "application/json")
|
|
||||||
request_info.add_data(action)
|
|
||||||
|
|
||||||
request = urlrequest.urlopen(request_info)
|
|
||||||
request.close()
|
|
||||||
return True
|
|
||||||
|
|
||||||
except KeystoneFail as e:
|
|
||||||
LOG.error("Keystone authentication failed:{} ".format(e))
|
|
||||||
return False
|
|
||||||
|
|
||||||
except HTTPError as e:
|
|
||||||
LOG.error("%s, %s" % (e.code, e.read()))
|
|
||||||
if e.code == 401:
|
|
||||||
admin_token.set_expired()
|
|
||||||
return False
|
|
||||||
|
|
||||||
except URLError as e:
|
|
||||||
LOG.error(e)
|
|
||||||
return False
|
|
||||||
|
|
||||||
def is_unlocked(self):
|
|
||||||
return(self.admin_state == HOST_ADMIN_STATE_UNLOCKED)
|
|
||||||
|
|
||||||
def is_locked(self):
|
|
||||||
return(not self.is_unlocked())
|
|
||||||
|
|
||||||
def is_enabled(self):
|
|
||||||
return(self.admin_state == HOST_ADMIN_STATE_UNLOCKED and
|
|
||||||
self.operational_state == HOST_OPERATIONAL_STATE_ENABLED)
|
|
||||||
|
|
||||||
def is_controller_enabled_provisioned(self):
|
|
||||||
return(self.admin_state == HOST_ADMIN_STATE_UNLOCKED and
|
|
||||||
self.operational_state == HOST_OPERATIONAL_STATE_ENABLED and
|
|
||||||
self.personality == HOST_PERSONALITY_CONTROLLER and
|
|
||||||
self.invprovision == HOST_PROVISIONED)
|
|
||||||
|
|
||||||
def is_disabled(self):
|
|
||||||
return(not self.is_enabled())
|
|
||||||
|
|
||||||
def support_power_off(self):
|
|
||||||
return(HOST_BM_TYPE_NOT_SET != self.bm_type)
|
|
||||||
|
|
||||||
def is_powered_off(self):
|
|
||||||
for status in self.avail_status:
|
|
||||||
if status == HOST_AVAIL_STATE_POWERED_OFF:
|
|
||||||
return(self.admin_state == HOST_ADMIN_STATE_LOCKED and
|
|
||||||
self.operational_state ==
|
|
||||||
HOST_OPERATIONAL_STATE_DISABLED)
|
|
||||||
return False
|
|
||||||
|
|
||||||
def is_powered_on(self):
|
|
||||||
return not self.is_powered_off()
|
|
||||||
|
|
||||||
def refresh_data(self, admin_token, region_name):
|
|
||||||
""" Ask the System Inventory for an update view of the host """
|
|
||||||
|
|
||||||
host_data = self.__host_update__(admin_token, region_name)
|
|
||||||
self.__host_set_state__(host_data)
|
|
||||||
|
|
||||||
def lock(self, admin_token, region_name):
|
|
||||||
""" Asks the Platform to perform a lock against a host """
|
|
||||||
|
|
||||||
if self.is_unlocked():
|
|
||||||
action = json.dumps([{"path": "/action",
|
|
||||||
"value": "lock", "op": "replace"}])
|
|
||||||
|
|
||||||
return self.__host_action__(admin_token, action, region_name)
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def force_lock(self, admin_token, region_name):
|
|
||||||
""" Asks the Platform to perform a force lock against a host """
|
|
||||||
|
|
||||||
if self.is_unlocked():
|
|
||||||
action = json.dumps([{"path": "/action",
|
|
||||||
"value": "force-lock", "op": "replace"}])
|
|
||||||
|
|
||||||
return self.__host_action__(admin_token, action, region_name)
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def unlock(self, admin_token, region_name):
|
|
||||||
""" Asks the Platform to perform an ulock against a host """
|
|
||||||
|
|
||||||
if self.is_locked():
|
|
||||||
action = json.dumps([{"path": "/action",
|
|
||||||
"value": "unlock", "op": "replace"}])
|
|
||||||
|
|
||||||
return self.__host_action__(admin_token, action, region_name)
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def power_off(self, admin_token, region_name):
|
|
||||||
""" Asks the Platform to perform a power-off against a host """
|
|
||||||
|
|
||||||
if self.is_powered_on():
|
|
||||||
action = json.dumps([{"path": "/action",
|
|
||||||
"value": "power-off", "op": "replace"}])
|
|
||||||
|
|
||||||
return self.__host_action__(admin_token, action, region_name)
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def power_on(self, admin_token, region_name):
|
|
||||||
""" Asks the Platform to perform a power-on against a host """
|
|
||||||
|
|
||||||
if self.is_powered_off():
|
|
||||||
action = json.dumps([{"path": "/action",
|
|
||||||
"value": "power-on", "op": "replace"}])
|
|
||||||
|
|
||||||
return self.__host_action__(admin_token, action, region_name)
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def get_hosts(admin_token, region_name, personality=None,
|
|
||||||
exclude_hostnames=None):
|
|
||||||
""" Asks System Inventory for a list of hosts """
|
|
||||||
|
|
||||||
if exclude_hostnames is None:
|
|
||||||
exclude_hostnames = []
|
|
||||||
|
|
||||||
try:
|
|
||||||
url = admin_token.get_service_admin_url("platform", "sysinv",
|
|
||||||
region_name)
|
|
||||||
url += "/ihosts/"
|
|
||||||
|
|
||||||
request_info = urlrequest.Request(url)
|
|
||||||
request_info.add_header("X-Auth-Token", admin_token.get_id())
|
|
||||||
request_info.add_header("Accept", "application/json")
|
|
||||||
|
|
||||||
request = urlrequest.urlopen(request_info)
|
|
||||||
response = json.loads(request.read())
|
|
||||||
request.close()
|
|
||||||
|
|
||||||
host_list = []
|
|
||||||
if personality is None:
|
|
||||||
for host in response['ihosts']:
|
|
||||||
if host['hostname'] not in exclude_hostnames:
|
|
||||||
host_list.append(Host(host['hostname'], host))
|
|
||||||
else:
|
|
||||||
for host in response['ihosts']:
|
|
||||||
if host['hostname'] not in exclude_hostnames:
|
|
||||||
if (host['personality'] == "controller" and
|
|
||||||
personality == HOST_PERSONALITY_CONTROLLER):
|
|
||||||
host_list.append(Host(host['hostname'], host))
|
|
||||||
|
|
||||||
elif (host['personality'] == "worker" and
|
|
||||||
personality == HOST_PERSONALITY_WORKER):
|
|
||||||
host_list.append(Host(host['hostname'], host))
|
|
||||||
|
|
||||||
elif (host['personality'] == "storage" and
|
|
||||||
personality == HOST_PERSONALITY_STORAGE):
|
|
||||||
host_list.append(Host(host['hostname'], host))
|
|
||||||
|
|
||||||
return host_list
|
|
||||||
|
|
||||||
except KeystoneFail as e:
|
|
||||||
LOG.error("Keystone authentication failed:{} ".format(e))
|
|
||||||
return []
|
|
||||||
|
|
||||||
except HTTPError as e:
|
|
||||||
LOG.error("%s, %s" % (e.code, e.read()))
|
|
||||||
if e.code == 401:
|
|
||||||
admin_token.set_expired()
|
|
||||||
return []
|
|
||||||
|
|
||||||
except URLError as e:
|
|
||||||
LOG.error(e)
|
|
||||||
return []
|
|
||||||
|
|
||||||
|
|
||||||
def dict_to_patch(values, install_action=False):
|
|
||||||
# install default action
|
|
||||||
if install_action:
|
|
||||||
values.update({'action': 'install'})
|
|
||||||
patch = []
|
|
||||||
for key, value in values.items():
|
|
||||||
path = '/' + key
|
|
||||||
patch.append({'op': 'replace', 'path': path, 'value': value})
|
|
||||||
return patch
|
|
||||||
|
|
||||||
|
|
||||||
def get_shared_services():
|
|
||||||
try:
|
|
||||||
services = ""
|
|
||||||
with openstack.OpenStack() as client:
|
|
||||||
systems = client.sysinv.isystem.list()
|
|
||||||
if systems:
|
|
||||||
services = systems[0].capabilities.get("shared_services", "")
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception("failed to get shared services")
|
|
||||||
raise e
|
|
||||||
|
|
||||||
return services
|
|
||||||
|
|
||||||
|
|
||||||
def get_alarms():
|
|
||||||
""" get all alarms """
|
|
||||||
alarm_list = []
|
|
||||||
try:
|
|
||||||
with openstack.OpenStack() as client:
|
|
||||||
alarm_list = client.sysinv.ialarm.list()
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception("failed to get alarms")
|
|
||||||
raise e
|
|
||||||
return alarm_list
|
|
||||||
|
|
||||||
|
|
||||||
def controller_enabled_provisioned(hostname):
|
|
||||||
""" check if host is enabled """
|
|
||||||
try:
|
|
||||||
with openstack.OpenStack() as client:
|
|
||||||
hosts = get_hosts(client.admin_token,
|
|
||||||
client.conf['region_name'])
|
|
||||||
for host in hosts:
|
|
||||||
if (hostname == host.name and
|
|
||||||
host.is_controller_enabled_provisioned()):
|
|
||||||
LOG.info("host %s is enabled/provisioned" % host.name)
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception("failed to check if host is enabled/provisioned")
|
|
||||||
raise e
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def get_system_uuid():
|
|
||||||
""" get system uuid """
|
|
||||||
try:
|
|
||||||
sysuuid = ""
|
|
||||||
with openstack.OpenStack() as client:
|
|
||||||
systems = client.sysinv.isystem.list()
|
|
||||||
if systems:
|
|
||||||
sysuuid = systems[0].uuid
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception("failed to get system uuid")
|
|
||||||
raise e
|
|
||||||
return sysuuid
|
|
||||||
|
|
||||||
|
|
||||||
def get_oam_ip():
|
|
||||||
""" get OAM ip details """
|
|
||||||
try:
|
|
||||||
with openstack.OpenStack() as client:
|
|
||||||
oam_list = client.sysinv.iextoam.list()
|
|
||||||
if oam_list:
|
|
||||||
return oam_list[0]
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception("failed to get OAM IP")
|
|
||||||
raise e
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def get_mac_addresses(hostname):
|
|
||||||
""" get MAC addresses for the host """
|
|
||||||
macs = {}
|
|
||||||
try:
|
|
||||||
with openstack.OpenStack() as client:
|
|
||||||
hosts = get_hosts(client.admin_token,
|
|
||||||
client.conf['region_name'])
|
|
||||||
for host in hosts:
|
|
||||||
if hostname == host.name:
|
|
||||||
port_list = client.sysinv.ethernet_port.list(host.uuid)
|
|
||||||
macs = {port.name: port.mac for port in port_list}
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception("failed to get MAC addresses")
|
|
||||||
raise e
|
|
||||||
return macs
|
|
||||||
|
|
||||||
|
|
||||||
def get_disk_serial_ids(hostname):
|
|
||||||
""" get disk serial ids for the host """
|
|
||||||
disk_serial_ids = {}
|
|
||||||
try:
|
|
||||||
with openstack.OpenStack() as client:
|
|
||||||
hosts = get_hosts(client.admin_token,
|
|
||||||
client.conf['region_name'])
|
|
||||||
for host in hosts:
|
|
||||||
if hostname == host.name:
|
|
||||||
disk_list = client.sysinv.idisk.list(host.uuid)
|
|
||||||
disk_serial_ids = {
|
|
||||||
disk.device_node: disk.serial_id for disk in disk_list}
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception("failed to get disks")
|
|
||||||
raise e
|
|
||||||
return disk_serial_ids
|
|
||||||
|
|
||||||
|
|
||||||
def update_clone_system(descr, hostname):
|
|
||||||
""" update system parameters on clone installation """
|
|
||||||
try:
|
|
||||||
with openstack.OpenStack() as client:
|
|
||||||
systems = client.sysinv.isystem.list()
|
|
||||||
if not systems:
|
|
||||||
return False
|
|
||||||
values = {
|
|
||||||
'name': "Cloned_system",
|
|
||||||
'description': descr
|
|
||||||
}
|
|
||||||
patch = dict_to_patch(values)
|
|
||||||
LOG.info("Updating system: {} [{}]".format(systems[0].name, patch))
|
|
||||||
client.sysinv.isystem.update(systems[0].uuid, patch)
|
|
||||||
|
|
||||||
hosts = get_hosts(client.admin_token,
|
|
||||||
client.conf['region_name'])
|
|
||||||
for host in hosts:
|
|
||||||
if hostname == host.name:
|
|
||||||
values = {
|
|
||||||
'location': {},
|
|
||||||
'serialid': ""
|
|
||||||
}
|
|
||||||
patch = dict_to_patch(values)
|
|
||||||
client.sysinv.ihost.update(host.uuid, patch)
|
|
||||||
LOG.info("Updating host: {} [{}]".format(host, patch))
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception("failed to update system parameters")
|
|
||||||
raise e
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def get_config_status(hostname):
|
|
||||||
""" get config status of the host """
|
|
||||||
try:
|
|
||||||
with openstack.OpenStack() as client:
|
|
||||||
hosts = get_hosts(client.admin_token,
|
|
||||||
client.conf['region_name'])
|
|
||||||
for host in hosts:
|
|
||||||
if hostname == host.name:
|
|
||||||
return host.config_status
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception("failed to get config status")
|
|
||||||
raise e
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def get_host_data(hostname):
|
|
||||||
""" get data for the specified host """
|
|
||||||
try:
|
|
||||||
with openstack.OpenStack() as client:
|
|
||||||
hosts = get_hosts(client.admin_token,
|
|
||||||
client.conf['region_name'])
|
|
||||||
for host in hosts:
|
|
||||||
if hostname == host.name:
|
|
||||||
return host
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception("failed to get host data")
|
|
||||||
raise e
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def do_worker_config_complete(hostname):
|
|
||||||
""" enable worker functionality """
|
|
||||||
try:
|
|
||||||
with openstack.OpenStack() as client:
|
|
||||||
hosts = get_hosts(client.admin_token,
|
|
||||||
client.conf['region_name'])
|
|
||||||
for host in hosts:
|
|
||||||
if hostname == host.name:
|
|
||||||
# Create/apply worker manifests
|
|
||||||
values = {
|
|
||||||
'action': "subfunction_config"
|
|
||||||
}
|
|
||||||
patch = dict_to_patch(values)
|
|
||||||
LOG.info("Applying worker manifests: {} [{}]"
|
|
||||||
.format(host, patch))
|
|
||||||
client.sysinv.ihost.update(host.uuid, patch)
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception("worker_config_complete failed")
|
|
||||||
raise e
|
|
||||||
|
|
||||||
|
|
||||||
def get_storage_backend_services():
|
|
||||||
""" get all storage backends and their assigned services """
|
|
||||||
backend_service_dict = {}
|
|
||||||
try:
|
|
||||||
with openstack.OpenStack() as client:
|
|
||||||
backend_list = client.sysinv.storage_backend.list()
|
|
||||||
for backend in backend_list:
|
|
||||||
backend_service_dict.update(
|
|
||||||
{backend.backend: backend.services})
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception("failed to get storage backend services")
|
|
||||||
raise e
|
|
||||||
|
|
||||||
return backend_service_dict
|
|
|
@ -1,499 +0,0 @@
|
||||||
"""
|
|
||||||
Copyright (c) 2015-2019 Wind River Systems, Inc.
|
|
||||||
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import print_function
|
|
||||||
from six.moves import configparser
|
|
||||||
import os
|
|
||||||
import readline
|
|
||||||
import sys
|
|
||||||
import textwrap
|
|
||||||
|
|
||||||
from controllerconfig.common import constants
|
|
||||||
from controllerconfig.common import log
|
|
||||||
from controllerconfig.common.exceptions import BackupFail
|
|
||||||
from controllerconfig.common.exceptions import RestoreFail
|
|
||||||
from controllerconfig.common.exceptions import UserQuit
|
|
||||||
from controllerconfig.common.exceptions import CloneFail
|
|
||||||
from controllerconfig import lag_mode_to_str
|
|
||||||
from controllerconfig import Network
|
|
||||||
from controllerconfig import validate
|
|
||||||
from controllerconfig import ConfigFail
|
|
||||||
from controllerconfig import DEFAULT_CONFIG
|
|
||||||
from controllerconfig import REGION_CONFIG
|
|
||||||
from controllerconfig import SUBCLOUD_CONFIG
|
|
||||||
from controllerconfig import MGMT_TYPE
|
|
||||||
from controllerconfig import HP_NAMES
|
|
||||||
from controllerconfig import DEFAULT_NAMES
|
|
||||||
from controllerconfig.configassistant import ConfigAssistant
|
|
||||||
from controllerconfig import backup_restore
|
|
||||||
from controllerconfig import utils
|
|
||||||
from controllerconfig import clone
|
|
||||||
|
|
||||||
# Temporary file for building cgcs_config
|
|
||||||
TEMP_CGCS_CONFIG_FILE = "/tmp/cgcs_config"
|
|
||||||
|
|
||||||
LOG = log.get_logger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def parse_system_config(config_file):
|
|
||||||
"""Parse system config file"""
|
|
||||||
system_config = configparser.RawConfigParser()
|
|
||||||
try:
|
|
||||||
system_config.read(config_file)
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception(e)
|
|
||||||
raise ConfigFail("Error parsing system config file")
|
|
||||||
|
|
||||||
# Dump configuration for debugging
|
|
||||||
# for section in config.sections():
|
|
||||||
# print "Section: %s" % section
|
|
||||||
# for (name, value) in config.items(section):
|
|
||||||
# print "name: %s, value: %s" % (name, value)
|
|
||||||
return system_config
|
|
||||||
|
|
||||||
|
|
||||||
def configure_management_interface(region_config, config_type=REGION_CONFIG):
|
|
||||||
"""Bring up management interface
|
|
||||||
"""
|
|
||||||
mgmt_network = Network()
|
|
||||||
if region_config.has_section('CLM_NETWORK'):
|
|
||||||
naming_type = HP_NAMES
|
|
||||||
else:
|
|
||||||
naming_type = DEFAULT_NAMES
|
|
||||||
|
|
||||||
if config_type == SUBCLOUD_CONFIG:
|
|
||||||
min_addresses = 5
|
|
||||||
else:
|
|
||||||
min_addresses = 8
|
|
||||||
try:
|
|
||||||
mgmt_network.parse_config(region_config, config_type, MGMT_TYPE,
|
|
||||||
min_addresses=min_addresses,
|
|
||||||
naming_type=naming_type)
|
|
||||||
except ConfigFail:
|
|
||||||
raise
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception("Error parsing configuration file")
|
|
||||||
raise ConfigFail("Error parsing configuration file: %s" % e)
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Remove interface config files currently installed
|
|
||||||
utils.remove_interface_config_files()
|
|
||||||
|
|
||||||
# Create the management interface configuration files.
|
|
||||||
# Code based on ConfigAssistant._write_interface_config_management
|
|
||||||
parameters = utils.get_interface_config_static(
|
|
||||||
mgmt_network.start_address,
|
|
||||||
mgmt_network.cidr,
|
|
||||||
mgmt_network.gateway_address)
|
|
||||||
|
|
||||||
if mgmt_network.logical_interface.lag_interface:
|
|
||||||
management_interface = 'bond0'
|
|
||||||
else:
|
|
||||||
management_interface = mgmt_network.logical_interface.ports[0]
|
|
||||||
|
|
||||||
if mgmt_network.vlan:
|
|
||||||
management_interface_name = "%s.%s" % (management_interface,
|
|
||||||
mgmt_network.vlan)
|
|
||||||
utils.write_interface_config_vlan(
|
|
||||||
management_interface_name,
|
|
||||||
mgmt_network.logical_interface.mtu,
|
|
||||||
parameters)
|
|
||||||
|
|
||||||
# underlying interface has no additional parameters
|
|
||||||
parameters = None
|
|
||||||
else:
|
|
||||||
management_interface_name = management_interface
|
|
||||||
|
|
||||||
if mgmt_network.logical_interface.lag_interface:
|
|
||||||
utils.write_interface_config_bond(
|
|
||||||
management_interface,
|
|
||||||
mgmt_network.logical_interface.mtu,
|
|
||||||
lag_mode_to_str(mgmt_network.logical_interface.lag_mode),
|
|
||||||
None,
|
|
||||||
constants.LAG_MIIMON_FREQUENCY,
|
|
||||||
mgmt_network.logical_interface.ports[0],
|
|
||||||
mgmt_network.logical_interface.ports[1],
|
|
||||||
parameters)
|
|
||||||
else:
|
|
||||||
utils.write_interface_config_ethernet(
|
|
||||||
management_interface,
|
|
||||||
mgmt_network.logical_interface.mtu,
|
|
||||||
parameters)
|
|
||||||
|
|
||||||
# Restart networking with the new management interface configuration
|
|
||||||
utils.restart_networking()
|
|
||||||
|
|
||||||
# Send a GARP for floating address. Doing this to help in
|
|
||||||
# cases where we are re-installing in a lab and another node
|
|
||||||
# previously held the floating address.
|
|
||||||
if mgmt_network.cidr.version == 4:
|
|
||||||
utils.send_interface_garp(management_interface_name,
|
|
||||||
mgmt_network.start_address)
|
|
||||||
except Exception:
|
|
||||||
LOG.exception("Failed to configure management interface")
|
|
||||||
raise ConfigFail("Failed to configure management interface")
|
|
||||||
|
|
||||||
|
|
||||||
def create_cgcs_config_file(output_file, system_config,
|
|
||||||
services, endpoints, domains,
|
|
||||||
config_type=REGION_CONFIG, validate_only=False):
|
|
||||||
"""
|
|
||||||
Create cgcs_config file or just perform validation of the system_config if
|
|
||||||
validate_only=True.
|
|
||||||
:param output_file: filename of output cgcs_config file
|
|
||||||
:param system_config: system configuration
|
|
||||||
:param services: keystone services (not used if validate_only)
|
|
||||||
:param endpoints: keystone endpoints (not used if validate_only)
|
|
||||||
:param domains: keystone domains (not used if validate_only)
|
|
||||||
:param config_type: specify region, subcloud or standard config
|
|
||||||
:param validate_only: used to validate the input system_config
|
|
||||||
:return:
|
|
||||||
"""
|
|
||||||
cgcs_config = None
|
|
||||||
if not validate_only:
|
|
||||||
cgcs_config = configparser.RawConfigParser()
|
|
||||||
cgcs_config.optionxform = str
|
|
||||||
|
|
||||||
# general error checking, if not validate_only cgcs config data is returned
|
|
||||||
validate(system_config, config_type, cgcs_config)
|
|
||||||
|
|
||||||
# Region configuration: services, endpoints and domain
|
|
||||||
if config_type in [REGION_CONFIG, SUBCLOUD_CONFIG] and not validate_only:
|
|
||||||
# The services and endpoints are not available in the validation phase
|
|
||||||
region_1_name = system_config.get('SHARED_SERVICES', 'REGION_NAME')
|
|
||||||
keystone_service_name = system_config.get('SHARED_SERVICES',
|
|
||||||
'KEYSTONE_SERVICE_NAME')
|
|
||||||
keystone_service_type = system_config.get('SHARED_SERVICES',
|
|
||||||
'KEYSTONE_SERVICE_TYPE')
|
|
||||||
keystone_service_id = services.get_service_id(keystone_service_name,
|
|
||||||
keystone_service_type)
|
|
||||||
keystone_admin_url = endpoints.get_service_url(region_1_name,
|
|
||||||
keystone_service_id,
|
|
||||||
"admin")
|
|
||||||
keystone_internal_url = endpoints.get_service_url(region_1_name,
|
|
||||||
keystone_service_id,
|
|
||||||
"internal")
|
|
||||||
keystone_public_url = endpoints.get_service_url(region_1_name,
|
|
||||||
keystone_service_id,
|
|
||||||
"public")
|
|
||||||
|
|
||||||
cgcs_config.set('cREGION', 'KEYSTONE_AUTH_URI', keystone_internal_url)
|
|
||||||
cgcs_config.set('cREGION', 'KEYSTONE_IDENTITY_URI', keystone_admin_url)
|
|
||||||
cgcs_config.set('cREGION', 'KEYSTONE_ADMIN_URI', keystone_admin_url)
|
|
||||||
cgcs_config.set('cREGION', 'KEYSTONE_INTERNAL_URI',
|
|
||||||
keystone_internal_url)
|
|
||||||
cgcs_config.set('cREGION', 'KEYSTONE_PUBLIC_URI', keystone_public_url)
|
|
||||||
|
|
||||||
# if ldap is a shared service
|
|
||||||
if (system_config.has_option('SHARED_SERVICES', 'LDAP_SERVICE_URL')):
|
|
||||||
ldap_service_url = system_config.get('SHARED_SERVICES',
|
|
||||||
'LDAP_SERVICE_URL')
|
|
||||||
cgcs_config.set('cREGION', 'LDAP_SERVICE_URI', ldap_service_url)
|
|
||||||
cgcs_config.set('cREGION', 'LDAP_SERVICE_NAME', 'open-ldap')
|
|
||||||
cgcs_config.set('cREGION', 'LDAP_REGION_NAME', region_1_name)
|
|
||||||
|
|
||||||
# If primary region is non-TiC and keystone entries already created,
|
|
||||||
# the flag will tell puppet not to create them.
|
|
||||||
if (system_config.has_option('REGION_2_SERVICES', 'CREATE') and
|
|
||||||
system_config.get('REGION_2_SERVICES', 'CREATE') == 'Y'):
|
|
||||||
cgcs_config.set('cREGION', 'REGION_SERVICES_CREATE', 'True')
|
|
||||||
|
|
||||||
# System Timezone configuration
|
|
||||||
if system_config.has_option('SYSTEM', 'TIMEZONE'):
|
|
||||||
timezone = system_config.get('SYSTEM', 'TIMEZONE')
|
|
||||||
if not os.path.isfile("/usr/share/zoneinfo/%s" % timezone):
|
|
||||||
raise ConfigFail(
|
|
||||||
"Timezone file %s does not exist" % timezone)
|
|
||||||
|
|
||||||
# Dump results for debugging
|
|
||||||
# for section in cgcs_config.sections():
|
|
||||||
# print "[%s]" % section
|
|
||||||
# for (name, value) in cgcs_config.items(section):
|
|
||||||
# print "%s=%s" % (name, value)
|
|
||||||
|
|
||||||
if not validate_only:
|
|
||||||
# Write config file
|
|
||||||
with open(output_file, 'w') as config_file:
|
|
||||||
cgcs_config.write(config_file)
|
|
||||||
|
|
||||||
|
|
||||||
def configure_system(config_file):
|
|
||||||
"""Configure the system"""
|
|
||||||
|
|
||||||
# Parse the system config file
|
|
||||||
print("Parsing system configuration file... ", end=' ')
|
|
||||||
system_config = parse_system_config(config_file)
|
|
||||||
print("DONE")
|
|
||||||
|
|
||||||
# Validate the system config file
|
|
||||||
print("Validating system configuration file... ", end=' ')
|
|
||||||
try:
|
|
||||||
create_cgcs_config_file(None, system_config, None, None, None,
|
|
||||||
DEFAULT_CONFIG, validate_only=True)
|
|
||||||
except configparser.Error as e:
|
|
||||||
raise ConfigFail("Error parsing configuration file %s: %s" %
|
|
||||||
(config_file, e))
|
|
||||||
print("DONE")
|
|
||||||
|
|
||||||
# Create cgcs_config file
|
|
||||||
print("Creating config apply file... ", end=' ')
|
|
||||||
try:
|
|
||||||
create_cgcs_config_file(TEMP_CGCS_CONFIG_FILE, system_config,
|
|
||||||
None, None, None, DEFAULT_CONFIG)
|
|
||||||
except configparser.Error as e:
|
|
||||||
raise ConfigFail("Error parsing configuration file %s: %s" %
|
|
||||||
(config_file, e))
|
|
||||||
print("DONE")
|
|
||||||
|
|
||||||
|
|
||||||
def show_help():
|
|
||||||
print("Usage: %s\n"
|
|
||||||
"--backup <name> Backup configuration using the given "
|
|
||||||
"name\n"
|
|
||||||
"--clone-iso <name> Clone and create an image with "
|
|
||||||
"the given file name\n"
|
|
||||||
"--clone-status Status of the last installation of "
|
|
||||||
"cloned image\n"
|
|
||||||
"--restore-system "
|
|
||||||
"<include-storage-reinstall | exclude-storage-reinstall> "
|
|
||||||
"<name>\n"
|
|
||||||
" Restore system configuration from backup "
|
|
||||||
"file with\n"
|
|
||||||
" the given name, full path required\n"
|
|
||||||
% sys.argv[0])
|
|
||||||
|
|
||||||
|
|
||||||
def show_help_lab_only():
|
|
||||||
print("Usage: %s\n"
|
|
||||||
"Perform initial configuration\n"
|
|
||||||
"\nThe following options are for lab use only:\n"
|
|
||||||
"--answerfile <file> Apply the configuration from the specified "
|
|
||||||
"file without\n"
|
|
||||||
" any validation or user interaction\n"
|
|
||||||
"--default Apply default configuration with no NTP or "
|
|
||||||
"DNS server\n"
|
|
||||||
" configuration (suitable for testing in a "
|
|
||||||
"virtual\n"
|
|
||||||
" environment)\n"
|
|
||||||
"--archive-dir <dir> Directory to store the archive in\n"
|
|
||||||
"--provision Provision initial system data only\n"
|
|
||||||
% sys.argv[0])
|
|
||||||
|
|
||||||
|
|
||||||
def no_complete(text, state):
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
options = {}
|
|
||||||
answerfile = None
|
|
||||||
backup_name = None
|
|
||||||
archive_dir = constants.BACKUPS_PATH
|
|
||||||
do_default_config = False
|
|
||||||
do_backup = False
|
|
||||||
do_system_restore = False
|
|
||||||
include_storage_reinstall = False
|
|
||||||
do_clone = False
|
|
||||||
do_non_interactive = False
|
|
||||||
do_provision = False
|
|
||||||
system_config_file = "/home/sysadmin/system_config"
|
|
||||||
allow_ssh = False
|
|
||||||
|
|
||||||
# Disable completion as the default completer shows python commands
|
|
||||||
readline.set_completer(no_complete)
|
|
||||||
|
|
||||||
# remove any previous config fail flag file
|
|
||||||
if os.path.exists(constants.CONFIG_FAIL_FILE) is True:
|
|
||||||
os.remove(constants.CONFIG_FAIL_FILE)
|
|
||||||
|
|
||||||
if os.environ.get('CGCS_LABMODE'):
|
|
||||||
options['labmode'] = True
|
|
||||||
|
|
||||||
arg = 1
|
|
||||||
while arg < len(sys.argv):
|
|
||||||
if sys.argv[arg] == "--answerfile":
|
|
||||||
arg += 1
|
|
||||||
if arg < len(sys.argv):
|
|
||||||
answerfile = sys.argv[arg]
|
|
||||||
else:
|
|
||||||
print("--answerfile option requires a file to be specified")
|
|
||||||
exit(1)
|
|
||||||
elif sys.argv[arg] == "--backup":
|
|
||||||
arg += 1
|
|
||||||
if arg < len(sys.argv):
|
|
||||||
backup_name = sys.argv[arg]
|
|
||||||
else:
|
|
||||||
print("--backup requires the name of the backup")
|
|
||||||
exit(1)
|
|
||||||
do_backup = True
|
|
||||||
elif sys.argv[arg] == "--restore-system":
|
|
||||||
arg += 1
|
|
||||||
if arg < len(sys.argv):
|
|
||||||
if sys.argv[arg] in ["include-storage-reinstall",
|
|
||||||
"exclude-storage-reinstall"]:
|
|
||||||
if sys.argv[arg] == "include-storage-reinstall":
|
|
||||||
include_storage_reinstall = True
|
|
||||||
arg += 1
|
|
||||||
if arg < len(sys.argv):
|
|
||||||
backup_name = sys.argv[arg]
|
|
||||||
else:
|
|
||||||
print(textwrap.fill(
|
|
||||||
"--restore-system requires the filename "
|
|
||||||
" of the backup", 80))
|
|
||||||
exit(1)
|
|
||||||
else:
|
|
||||||
backup_name = sys.argv[arg]
|
|
||||||
else:
|
|
||||||
print(textwrap.fill(
|
|
||||||
"--restore-system requires the filename "
|
|
||||||
"of the backup", 80))
|
|
||||||
exit(1)
|
|
||||||
do_system_restore = True
|
|
||||||
elif sys.argv[arg] == "--archive-dir":
|
|
||||||
arg += 1
|
|
||||||
if arg < len(sys.argv):
|
|
||||||
archive_dir = sys.argv[arg]
|
|
||||||
else:
|
|
||||||
print("--archive-dir requires a directory")
|
|
||||||
exit(1)
|
|
||||||
elif sys.argv[arg] == "--clone-iso":
|
|
||||||
arg += 1
|
|
||||||
if arg < len(sys.argv):
|
|
||||||
backup_name = sys.argv[arg]
|
|
||||||
else:
|
|
||||||
print("--clone-iso requires the name of the image")
|
|
||||||
exit(1)
|
|
||||||
do_clone = True
|
|
||||||
elif sys.argv[arg] == "--clone-status":
|
|
||||||
clone.clone_status()
|
|
||||||
exit(0)
|
|
||||||
elif sys.argv[arg] == "--default":
|
|
||||||
do_default_config = True
|
|
||||||
elif sys.argv[arg] == "--config-file":
|
|
||||||
arg += 1
|
|
||||||
if arg < len(sys.argv):
|
|
||||||
system_config_file = sys.argv[arg]
|
|
||||||
else:
|
|
||||||
print("--config-file requires the filename of the config file")
|
|
||||||
exit(1)
|
|
||||||
do_non_interactive = True
|
|
||||||
elif sys.argv[arg] in ["--help", "-h", "-?"]:
|
|
||||||
show_help()
|
|
||||||
exit(1)
|
|
||||||
elif sys.argv[arg] == "--labhelp":
|
|
||||||
show_help_lab_only()
|
|
||||||
exit(1)
|
|
||||||
elif sys.argv[arg] == "--provision":
|
|
||||||
do_provision = True
|
|
||||||
elif sys.argv[arg] == "--allow-ssh":
|
|
||||||
allow_ssh = True
|
|
||||||
elif sys.argv[arg] == "--kubernetes":
|
|
||||||
# This is a temporary flag for use during development. Once things
|
|
||||||
# are stable, we will remove it and make kubernetes the default.
|
|
||||||
options['kubernetes'] = True
|
|
||||||
else:
|
|
||||||
print("Invalid option. Use --help for more information.")
|
|
||||||
exit(1)
|
|
||||||
arg += 1
|
|
||||||
|
|
||||||
if [do_backup,
|
|
||||||
do_system_restore,
|
|
||||||
do_clone,
|
|
||||||
do_default_config,
|
|
||||||
do_non_interactive].count(True) > 1:
|
|
||||||
print("Invalid combination of options selected")
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
if answerfile and [do_backup,
|
|
||||||
do_system_restore,
|
|
||||||
do_clone,
|
|
||||||
do_default_config,
|
|
||||||
do_non_interactive].count(True) > 0:
|
|
||||||
print("The --answerfile option cannot be used with the selected "
|
|
||||||
"option")
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
log.configure()
|
|
||||||
|
|
||||||
if not do_backup and not do_clone:
|
|
||||||
# Check if that the command is being run from the console
|
|
||||||
if utils.is_ssh_parent():
|
|
||||||
if allow_ssh:
|
|
||||||
print(textwrap.fill(constants.SSH_WARNING_MESSAGE, 80))
|
|
||||||
print('')
|
|
||||||
else:
|
|
||||||
print(textwrap.fill(constants.SSH_ERROR_MESSAGE, 80))
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
# Reduce the printk console log level to avoid noise during configuration
|
|
||||||
printk_levels = ''
|
|
||||||
with open('/proc/sys/kernel/printk', 'r') as f:
|
|
||||||
printk_levels = f.readline()
|
|
||||||
|
|
||||||
temp_printk_levels = '3' + printk_levels[1:]
|
|
||||||
with open('/proc/sys/kernel/printk', 'w') as f:
|
|
||||||
f.write(temp_printk_levels)
|
|
||||||
|
|
||||||
try:
|
|
||||||
if do_backup:
|
|
||||||
backup_restore.backup(backup_name, archive_dir)
|
|
||||||
print("\nBackup complete")
|
|
||||||
elif do_system_restore:
|
|
||||||
backup_restore.restore_system(backup_name,
|
|
||||||
include_storage_reinstall)
|
|
||||||
print("\nSystem restore complete")
|
|
||||||
elif do_clone:
|
|
||||||
clone.clone(backup_name, archive_dir)
|
|
||||||
print("\nCloning complete")
|
|
||||||
elif do_provision:
|
|
||||||
assistant = ConfigAssistant(**options)
|
|
||||||
assistant.provision(answerfile)
|
|
||||||
else:
|
|
||||||
print(textwrap.fill(
|
|
||||||
"Please use bootstrap playbook to configure the "
|
|
||||||
"first controller.", 80))
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
if do_non_interactive:
|
|
||||||
if not os.path.isfile(system_config_file):
|
|
||||||
raise ConfigFail("Config file %s does not exist." %
|
|
||||||
system_config_file)
|
|
||||||
if (os.path.exists(constants.CGCS_CONFIG_FILE) or
|
|
||||||
os.path.exists(constants.CONFIG_PERMDIR) or
|
|
||||||
os.path.exists(
|
|
||||||
constants.INITIAL_CONFIG_COMPLETE_FILE)):
|
|
||||||
raise ConfigFail("Configuration has already been done "
|
|
||||||
"and cannot be repeated.")
|
|
||||||
configure_system(system_config_file)
|
|
||||||
answerfile = TEMP_CGCS_CONFIG_FILE
|
|
||||||
assistant = ConfigAssistant(**options)
|
|
||||||
assistant.configure(answerfile, do_default_config)
|
|
||||||
print("\nConfiguration was applied\n")
|
|
||||||
print(textwrap.fill(
|
|
||||||
"Please complete any out of service commissioning steps "
|
|
||||||
"with system commands and unlock controller to proceed.", 80))
|
|
||||||
assistant.check_required_interfaces_status()
|
|
||||||
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
print("\nAborting configuration")
|
|
||||||
except BackupFail as e:
|
|
||||||
print("\nBackup failed: {}".format(e))
|
|
||||||
except RestoreFail as e:
|
|
||||||
print("\nRestore failed: {}".format(e))
|
|
||||||
except ConfigFail as e:
|
|
||||||
print("\nConfiguration failed: {}".format(e))
|
|
||||||
except CloneFail as e:
|
|
||||||
print("\nCloning failed: {}".format(e))
|
|
||||||
except UserQuit:
|
|
||||||
print("\nAborted configuration")
|
|
||||||
finally:
|
|
||||||
if os.path.isfile(TEMP_CGCS_CONFIG_FILE):
|
|
||||||
os.remove(TEMP_CGCS_CONFIG_FILE)
|
|
||||||
|
|
||||||
# Restore the printk console log level
|
|
||||||
with open('/proc/sys/kernel/printk', 'w') as f:
|
|
||||||
f.write(printk_levels)
|
|
|
@ -1,78 +0,0 @@
|
||||||
[SYSTEM]
|
|
||||||
SYSTEM_MODE=duplex
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_1]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth0
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_2]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth1
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_3]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth2
|
|
||||||
|
|
||||||
[MGMT_NETWORK]
|
|
||||||
VLAN=121
|
|
||||||
IP_START_ADDRESS=192.168.204.102
|
|
||||||
IP_END_ADDRESS=192.168.204.199
|
|
||||||
CIDR=192.168.204.0/24
|
|
||||||
MULTICAST_CIDR=239.1.1.0/28
|
|
||||||
;GATEWAY=192.168.204.12
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
|
||||||
DYNAMIC_ALLOCATION=N
|
|
||||||
|
|
||||||
[OAM_NETWORK]
|
|
||||||
;VLAN=
|
|
||||||
IP_START_ADDRESS=10.10.10.2
|
|
||||||
IP_END_ADDRESS=10.10.10.99
|
|
||||||
CIDR=10.10.10.0/24
|
|
||||||
GATEWAY=10.10.10.1
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
|
||||||
|
|
||||||
[REGION2_PXEBOOT_NETWORK]
|
|
||||||
PXEBOOT_CIDR=192.168.203.0/24
|
|
||||||
|
|
||||||
[SHARED_SERVICES]
|
|
||||||
REGION_NAME=RegionOne
|
|
||||||
ADMIN_PROJECT_NAME=admin
|
|
||||||
ADMIN_USER_NAME=admin
|
|
||||||
ADMIN_USER_DOMAIN=admin_domain
|
|
||||||
ADMIN_PROJECT_DOMAIN=admin_domain
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
||||||
KEYSTONE_ADMINURL=http://192.168.204.12:35357/v2.0
|
|
||||||
KEYSTONE_SERVICE_NAME=keystone
|
|
||||||
KEYSTONE_SERVICE_TYPE=identity
|
|
||||||
SERVICE_PROJECT_NAME=FULL_TEST
|
|
||||||
|
|
||||||
[REGION_2_SERVICES]
|
|
||||||
REGION_NAME=RegionTwo
|
|
||||||
USER_DOMAIN_NAME=service_domain
|
|
||||||
PROJECT_DOMAIN_NAME=service_domain
|
|
||||||
|
|
||||||
SYSINV_USER_NAME=sysinvTWO
|
|
||||||
SYSINV_PASSWORD=password2WO*
|
|
||||||
SYSINV_SERVICE_NAME=sysinv
|
|
||||||
SYSINV_SERVICE_TYPE=platform
|
|
||||||
PATCHING_USER_NAME=patchingTWO
|
|
||||||
PATCHING_PASSWORD=password2WO*
|
|
||||||
PATCHING_SERVICE_NAME=patching
|
|
||||||
PATCHING_SERVICE_TYPE=patching
|
|
||||||
NFV_USER_NAME=vimTWO
|
|
||||||
NFV_PASSWORD=password2WO*
|
|
||||||
MTCE_USER_NAME=mtceTWO
|
|
||||||
MTCE_PASSWORD=password2WO*
|
|
||||||
FM_USER_NAME=fmTWO
|
|
||||||
FM_PASSWORD=password2WO*
|
|
||||||
BARBICAN_USER_NAME=barbican
|
|
||||||
BARBICAN_PASSWORD=barbican2WO*
|
|
||||||
|
|
||||||
[VERSION]
|
|
||||||
RELEASE = TEST.SW.VERSION
|
|
|
@ -1,78 +0,0 @@
|
||||||
[cSYSTEM]
|
|
||||||
TIMEZONE = UTC
|
|
||||||
SYSTEM_MODE = duplex
|
|
||||||
|
|
||||||
[cPXEBOOT]
|
|
||||||
PXEBOOT_SUBNET = 192.168.203.0/24
|
|
||||||
CONTROLLER_PXEBOOT_FLOATING_ADDRESS = 192.168.203.2
|
|
||||||
CONTROLLER_PXEBOOT_ADDRESS_0 = 192.168.203.3
|
|
||||||
CONTROLLER_PXEBOOT_ADDRESS_1 = 192.168.203.4
|
|
||||||
PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller
|
|
||||||
|
|
||||||
[cMGMT]
|
|
||||||
MANAGEMENT_MTU = 1500
|
|
||||||
MANAGEMENT_SUBNET = 192.168.204.0/24
|
|
||||||
LAG_MANAGEMENT_INTERFACE = no
|
|
||||||
MANAGEMENT_INTERFACE = eth0
|
|
||||||
MANAGEMENT_VLAN = 121
|
|
||||||
MANAGEMENT_INTERFACE_NAME = eth0.121
|
|
||||||
CONTROLLER_FLOATING_ADDRESS = 192.168.204.102
|
|
||||||
CONTROLLER_0_ADDRESS = 192.168.204.103
|
|
||||||
CONTROLLER_1_ADDRESS = 192.168.204.104
|
|
||||||
NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105
|
|
||||||
NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106
|
|
||||||
CONTROLLER_FLOATING_HOSTNAME = controller
|
|
||||||
CONTROLLER_HOSTNAME_PREFIX = controller-
|
|
||||||
OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller
|
|
||||||
DYNAMIC_ADDRESS_ALLOCATION = no
|
|
||||||
MANAGEMENT_START_ADDRESS = 192.168.204.102
|
|
||||||
MANAGEMENT_END_ADDRESS = 192.168.204.199
|
|
||||||
MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28
|
|
||||||
|
|
||||||
[cEXT_OAM]
|
|
||||||
EXTERNAL_OAM_MTU = 1500
|
|
||||||
EXTERNAL_OAM_SUBNET = 10.10.10.0/24
|
|
||||||
LAG_EXTERNAL_OAM_INTERFACE = no
|
|
||||||
EXTERNAL_OAM_INTERFACE = eth1
|
|
||||||
EXTERNAL_OAM_INTERFACE_NAME = eth1
|
|
||||||
EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1
|
|
||||||
EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2
|
|
||||||
EXTERNAL_OAM_0_ADDRESS = 10.10.10.3
|
|
||||||
EXTERNAL_OAM_1_ADDRESS = 10.10.10.4
|
|
||||||
|
|
||||||
[cREGION]
|
|
||||||
REGION_CONFIG = True
|
|
||||||
REGION_1_NAME = RegionOne
|
|
||||||
REGION_2_NAME = RegionTwo
|
|
||||||
ADMIN_USER_NAME = admin
|
|
||||||
ADMIN_USER_DOMAIN = admin_domain
|
|
||||||
ADMIN_PROJECT_NAME = admin
|
|
||||||
ADMIN_PROJECT_DOMAIN = admin_domain
|
|
||||||
SERVICE_PROJECT_NAME = FULL_TEST
|
|
||||||
KEYSTONE_SERVICE_NAME = keystone
|
|
||||||
KEYSTONE_SERVICE_TYPE = identity
|
|
||||||
PATCHING_USER_NAME = patchingTWO
|
|
||||||
PATCHING_PASSWORD = password2WO*
|
|
||||||
SYSINV_USER_NAME = sysinvTWO
|
|
||||||
SYSINV_PASSWORD = password2WO*
|
|
||||||
SYSINV_SERVICE_NAME = sysinv
|
|
||||||
SYSINV_SERVICE_TYPE = platform
|
|
||||||
NFV_USER_NAME = vimTWO
|
|
||||||
NFV_PASSWORD = password2WO*
|
|
||||||
MTCE_USER_NAME = mtceTWO
|
|
||||||
MTCE_PASSWORD = password2WO*
|
|
||||||
FM_USER_NAME = fmTWO
|
|
||||||
FM_PASSWORD = password2WO*
|
|
||||||
BARBICAN_USER_NAME = barbican
|
|
||||||
BARBICAN_PASSWORD = barbican2WO*
|
|
||||||
USER_DOMAIN_NAME = service_domain
|
|
||||||
PROJECT_DOMAIN_NAME = service_domain
|
|
||||||
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
|
||||||
KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
|
||||||
KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
|
||||||
KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
|
||||||
KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0
|
|
||||||
|
|
||||||
[cAUTHENTICATION]
|
|
||||||
ADMIN_PASSWORD = Li69nux*
|
|
||||||
|
|
|
@ -1,77 +0,0 @@
|
||||||
[SYSTEM]
|
|
||||||
SYSTEM_MODE = duplex
|
|
||||||
|
|
||||||
[STORAGE]
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_1]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth0
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_2]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth1
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_3]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth2
|
|
||||||
|
|
||||||
[MGMT_NETWORK]
|
|
||||||
VLAN=121
|
|
||||||
IP_START_ADDRESS=192.168.204.102
|
|
||||||
IP_END_ADDRESS=192.168.204.199
|
|
||||||
CIDR=192.168.204.0/24
|
|
||||||
MULTICAST_CIDR=239.1.1.0/28
|
|
||||||
;GATEWAY=192.168.204.12
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
|
||||||
DYNAMIC_ALLOCATION=N
|
|
||||||
|
|
||||||
[OAM_NETWORK]
|
|
||||||
;VLAN=
|
|
||||||
IP_START_ADDRESS=10.10.10.2
|
|
||||||
IP_END_ADDRESS=10.10.10.99
|
|
||||||
CIDR=10.10.10.0/24
|
|
||||||
GATEWAY=10.10.10.1
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
|
||||||
|
|
||||||
[REGION2_PXEBOOT_NETWORK]
|
|
||||||
PXEBOOT_CIDR=192.168.203.0/24
|
|
||||||
|
|
||||||
[SHARED_SERVICES]
|
|
||||||
REGION_NAME=RegionOne
|
|
||||||
ADMIN_PROJECT_NAME=admin
|
|
||||||
ADMIN_USER_NAME=admin
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
||||||
KEYSTONE_ADMINURL=http://192.168.204.12:35357/v2.0
|
|
||||||
KEYSTONE_SERVICE_NAME=keystone
|
|
||||||
KEYSTONE_SERVICE_TYPE=identity
|
|
||||||
SERVICE_PROJECT_NAME=FULL_TEST
|
|
||||||
|
|
||||||
LDAP_SERVICE_URL=ldap://192.168.204.12:389
|
|
||||||
|
|
||||||
[REGION_2_SERVICES]
|
|
||||||
REGION_NAME=RegionTwo
|
|
||||||
SYSINV_USER_NAME=sysinvTWO
|
|
||||||
SYSINV_PASSWORD=password2WO*
|
|
||||||
SYSINV_SERVICE_NAME=sysinv
|
|
||||||
SYSINV_SERVICE_TYPE=platform
|
|
||||||
PATCHING_USER_NAME=patchingTWO
|
|
||||||
PATCHING_PASSWORD=password2WO*
|
|
||||||
PATCHING_SERVICE_NAME=patching
|
|
||||||
PATCHING_SERVICE_TYPE=patching
|
|
||||||
NFV_USER_NAME=vimTWO
|
|
||||||
NFV_PASSWORD=password2WO*
|
|
||||||
MTCE_USER_NAME=mtceTWO
|
|
||||||
MTCE_PASSWORD=password2WO*
|
|
||||||
FM_USER_NAME=fmTWO
|
|
||||||
FM_PASSWORD=password2WO*
|
|
||||||
BARBICAN_USER_NAME=barbican
|
|
||||||
BARBICAN_PASSWORD=barbican2WO*
|
|
||||||
|
|
||||||
[VERSION]
|
|
||||||
RELEASE = TEST.SW.VERSION
|
|
|
@ -1,81 +0,0 @@
|
||||||
[cSYSTEM]
|
|
||||||
TIMEZONE = UTC
|
|
||||||
SYSTEM_MODE = duplex
|
|
||||||
|
|
||||||
[cPXEBOOT]
|
|
||||||
PXEBOOT_SUBNET = 192.168.203.0/24
|
|
||||||
CONTROLLER_PXEBOOT_FLOATING_ADDRESS = 192.168.203.2
|
|
||||||
CONTROLLER_PXEBOOT_ADDRESS_0 = 192.168.203.3
|
|
||||||
CONTROLLER_PXEBOOT_ADDRESS_1 = 192.168.203.4
|
|
||||||
PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller
|
|
||||||
|
|
||||||
[cMGMT]
|
|
||||||
MANAGEMENT_MTU = 1500
|
|
||||||
MANAGEMENT_SUBNET = 192.168.204.0/24
|
|
||||||
LAG_MANAGEMENT_INTERFACE = no
|
|
||||||
MANAGEMENT_INTERFACE = eth0
|
|
||||||
MANAGEMENT_VLAN = 121
|
|
||||||
MANAGEMENT_INTERFACE_NAME = eth0.121
|
|
||||||
CONTROLLER_FLOATING_ADDRESS = 192.168.204.102
|
|
||||||
CONTROLLER_0_ADDRESS = 192.168.204.103
|
|
||||||
CONTROLLER_1_ADDRESS = 192.168.204.104
|
|
||||||
NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105
|
|
||||||
NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106
|
|
||||||
CONTROLLER_FLOATING_HOSTNAME = controller
|
|
||||||
CONTROLLER_HOSTNAME_PREFIX = controller-
|
|
||||||
OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller
|
|
||||||
DYNAMIC_ADDRESS_ALLOCATION = no
|
|
||||||
MANAGEMENT_START_ADDRESS = 192.168.204.102
|
|
||||||
MANAGEMENT_END_ADDRESS = 192.168.204.199
|
|
||||||
MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28
|
|
||||||
|
|
||||||
[cEXT_OAM]
|
|
||||||
EXTERNAL_OAM_MTU = 1500
|
|
||||||
EXTERNAL_OAM_SUBNET = 10.10.10.0/24
|
|
||||||
LAG_EXTERNAL_OAM_INTERFACE = no
|
|
||||||
EXTERNAL_OAM_INTERFACE = eth1
|
|
||||||
EXTERNAL_OAM_INTERFACE_NAME = eth1
|
|
||||||
EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1
|
|
||||||
EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2
|
|
||||||
EXTERNAL_OAM_0_ADDRESS = 10.10.10.3
|
|
||||||
EXTERNAL_OAM_1_ADDRESS = 10.10.10.4
|
|
||||||
|
|
||||||
[cREGION]
|
|
||||||
REGION_CONFIG = True
|
|
||||||
REGION_1_NAME = RegionOne
|
|
||||||
REGION_2_NAME = RegionTwo
|
|
||||||
ADMIN_USER_NAME = admin
|
|
||||||
ADMIN_USER_DOMAIN = Default
|
|
||||||
ADMIN_PROJECT_NAME = admin
|
|
||||||
ADMIN_PROJECT_DOMAIN = Default
|
|
||||||
SERVICE_PROJECT_NAME = FULL_TEST
|
|
||||||
KEYSTONE_SERVICE_NAME = keystone
|
|
||||||
KEYSTONE_SERVICE_TYPE = identity
|
|
||||||
PATCHING_USER_NAME = patchingTWO
|
|
||||||
PATCHING_PASSWORD = password2WO*
|
|
||||||
SYSINV_USER_NAME = sysinvTWO
|
|
||||||
SYSINV_PASSWORD = password2WO*
|
|
||||||
SYSINV_SERVICE_NAME = sysinv
|
|
||||||
SYSINV_SERVICE_TYPE = platform
|
|
||||||
NFV_USER_NAME = vimTWO
|
|
||||||
NFV_PASSWORD = password2WO*
|
|
||||||
MTCE_USER_NAME = mtceTWO
|
|
||||||
MTCE_PASSWORD = password2WO*
|
|
||||||
FM_USER_NAME = fmTWO
|
|
||||||
FM_PASSWORD = password2WO*
|
|
||||||
BARBICAN_USER_NAME = barbican
|
|
||||||
BARBICAN_PASSWORD = barbican2WO*
|
|
||||||
USER_DOMAIN_NAME = Default
|
|
||||||
PROJECT_DOMAIN_NAME = Default
|
|
||||||
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
|
||||||
KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
|
||||||
KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
|
||||||
KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
|
||||||
KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0
|
|
||||||
LDAP_SERVICE_URI = ldap://192.168.204.12:389
|
|
||||||
LDAP_SERVICE_NAME = open-ldap
|
|
||||||
LDAP_REGION_NAME = RegionOne
|
|
||||||
|
|
||||||
[cAUTHENTICATION]
|
|
||||||
ADMIN_PASSWORD = Li69nux*
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
# Dummy certificate file
|
|
|
@ -1,62 +0,0 @@
|
||||||
[cSYSTEM]
|
|
||||||
# System Configuration
|
|
||||||
SYSTEM_MODE=duplex
|
|
||||||
TIMEZONE=UTC
|
|
||||||
|
|
||||||
[cPXEBOOT]
|
|
||||||
# PXEBoot Network Support Configuration
|
|
||||||
PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller
|
|
||||||
|
|
||||||
[cMGMT]
|
|
||||||
# Management Network Configuration
|
|
||||||
MANAGEMENT_INTERFACE_NAME=eth1
|
|
||||||
MANAGEMENT_INTERFACE=eth1
|
|
||||||
MANAGEMENT_MTU=1500
|
|
||||||
MANAGEMENT_SUBNET=192.168.204.0/24
|
|
||||||
LAG_MANAGEMENT_INTERFACE=no
|
|
||||||
CONTROLLER_FLOATING_ADDRESS=192.168.204.2
|
|
||||||
CONTROLLER_0_ADDRESS=192.168.204.3
|
|
||||||
CONTROLLER_1_ADDRESS=192.168.204.4
|
|
||||||
NFS_MANAGEMENT_ADDRESS_1=192.168.204.7
|
|
||||||
NFS_MANAGEMENT_ADDRESS_2=192.168.204.8
|
|
||||||
CONTROLLER_FLOATING_HOSTNAME=controller
|
|
||||||
CONTROLLER_HOSTNAME_PREFIX=controller-
|
|
||||||
OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller
|
|
||||||
DYNAMIC_ADDRESS_ALLOCATION=yes
|
|
||||||
MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28
|
|
||||||
|
|
||||||
[cCLUSTER]
|
|
||||||
# Cluster Host Network Configuration
|
|
||||||
CLUSTER_INTERFACE_NAME=eth1
|
|
||||||
CLUSTER_INTERFACE=eth1
|
|
||||||
CLUSTER_VLAN=NC
|
|
||||||
CLUSTER_MTU=1500
|
|
||||||
CLUSTER_SUBNET=192.168.206.0/24
|
|
||||||
LAG_CLUSTER_INTERFACE=no
|
|
||||||
|
|
||||||
[cEXT_OAM]
|
|
||||||
# External OAM Network Configuration
|
|
||||||
EXTERNAL_OAM_INTERFACE_NAME=eth0
|
|
||||||
EXTERNAL_OAM_INTERFACE=eth0
|
|
||||||
EXTERNAL_OAM_VLAN=NC
|
|
||||||
EXTERNAL_OAM_MTU=1500
|
|
||||||
LAG_EXTERNAL_OAM_INTERFACE=no
|
|
||||||
EXTERNAL_OAM_SUBNET=10.10.10.0/24
|
|
||||||
EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1
|
|
||||||
EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2
|
|
||||||
EXTERNAL_OAM_0_ADDRESS=10.10.10.3
|
|
||||||
EXTERNAL_OAM_1_ADDRESS=10.10.10.4
|
|
||||||
|
|
||||||
[cDNS]
|
|
||||||
# DNS Configuration
|
|
||||||
NAMESERVER_1=8.8.8.8
|
|
||||||
NAMESERVER_2=8.8.4.4
|
|
||||||
NAMESERVER_3=NC
|
|
||||||
|
|
||||||
[cSECURITY]
|
|
||||||
[cREGION]
|
|
||||||
# Region Configuration
|
|
||||||
REGION_CONFIG=False
|
|
||||||
|
|
||||||
[cAUTHENTICATION]
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
|
@ -1,62 +0,0 @@
|
||||||
[cSYSTEM]
|
|
||||||
# System Configuration
|
|
||||||
SYSTEM_MODE=duplex
|
|
||||||
TIMEZONE=UTC
|
|
||||||
|
|
||||||
[cPXEBOOT]
|
|
||||||
# PXEBoot Network Support Configuration
|
|
||||||
PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller
|
|
||||||
|
|
||||||
[cMGMT]
|
|
||||||
# Management Network Configuration
|
|
||||||
MANAGEMENT_INTERFACE_NAME=eth1
|
|
||||||
MANAGEMENT_INTERFACE=eth1
|
|
||||||
MANAGEMENT_MTU=1500
|
|
||||||
MANAGEMENT_SUBNET=192.168.204.0/24
|
|
||||||
LAG_MANAGEMENT_INTERFACE=no
|
|
||||||
CONTROLLER_FLOATING_ADDRESS=192.168.204.2
|
|
||||||
CONTROLLER_0_ADDRESS=192.168.204.3
|
|
||||||
CONTROLLER_1_ADDRESS=192.168.204.4
|
|
||||||
NFS_MANAGEMENT_ADDRESS_1=192.168.204.5
|
|
||||||
NFS_MANAGEMENT_ADDRESS_2=192.168.204.6
|
|
||||||
CONTROLLER_FLOATING_HOSTNAME=controller
|
|
||||||
CONTROLLER_HOSTNAME_PREFIX=controller-
|
|
||||||
OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller
|
|
||||||
DYNAMIC_ADDRESS_ALLOCATION=yes
|
|
||||||
MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28
|
|
||||||
|
|
||||||
[cCLUSTER]
|
|
||||||
# Cluster Host Network Configuration
|
|
||||||
CLUSTER_INTERFACE_NAME=eth1
|
|
||||||
CLUSTER_INTERFACE=eth1
|
|
||||||
CLUSTER_VLAN=NC
|
|
||||||
CLUSTER_MTU=1500
|
|
||||||
CLUSTER_SUBNET=192.168.206.0/24
|
|
||||||
LAG_CLUSTER_INTERFACE=no
|
|
||||||
|
|
||||||
[cEXT_OAM]
|
|
||||||
# External OAM Network Configuration
|
|
||||||
EXTERNAL_OAM_INTERFACE_NAME=eth0
|
|
||||||
EXTERNAL_OAM_INTERFACE=eth0
|
|
||||||
EXTERNAL_OAM_VLAN=NC
|
|
||||||
EXTERNAL_OAM_MTU=1500
|
|
||||||
LAG_EXTERNAL_OAM_INTERFACE=no
|
|
||||||
EXTERNAL_OAM_SUBNET=10.10.10.0/24
|
|
||||||
EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1
|
|
||||||
EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2
|
|
||||||
EXTERNAL_OAM_0_ADDRESS=10.10.10.3
|
|
||||||
EXTERNAL_OAM_1_ADDRESS=10.10.10.4
|
|
||||||
|
|
||||||
[cDNS]
|
|
||||||
# DNS Configuration
|
|
||||||
NAMESERVER_1=8.8.8.8
|
|
||||||
NAMESERVER_2=8.8.4.4
|
|
||||||
NAMESERVER_3=NC
|
|
||||||
|
|
||||||
[cSECURITY]
|
|
||||||
[cREGION]
|
|
||||||
# Region Configuration
|
|
||||||
REGION_CONFIG=False
|
|
||||||
|
|
||||||
[cAUTHENTICATION]
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
|
@ -1,62 +0,0 @@
|
||||||
[cSYSTEM]
|
|
||||||
# System Configuration
|
|
||||||
SYSTEM_MODE=duplex
|
|
||||||
TIMEZONE=UTC
|
|
||||||
|
|
||||||
[cPXEBOOT]
|
|
||||||
# PXEBoot Network Support Configuration
|
|
||||||
PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller
|
|
||||||
|
|
||||||
[cMGMT]
|
|
||||||
# Management Network Configuration
|
|
||||||
MANAGEMENT_INTERFACE_NAME=eth1
|
|
||||||
MANAGEMENT_INTERFACE=eth1
|
|
||||||
MANAGEMENT_MTU=1500
|
|
||||||
MANAGEMENT_SUBNET=1234::/64
|
|
||||||
LAG_MANAGEMENT_INTERFACE=no
|
|
||||||
CONTROLLER_FLOATING_ADDRESS=1234::2
|
|
||||||
CONTROLLER_0_ADDRESS=1234::3
|
|
||||||
CONTROLLER_1_ADDRESS=1234::4
|
|
||||||
NFS_MANAGEMENT_ADDRESS_1=1234::5
|
|
||||||
NFS_MANAGEMENT_ADDRESS_2=1234::6
|
|
||||||
CONTROLLER_FLOATING_HOSTNAME=controller
|
|
||||||
CONTROLLER_HOSTNAME_PREFIX=controller-
|
|
||||||
OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller
|
|
||||||
DYNAMIC_ADDRESS_ALLOCATION=yes
|
|
||||||
MANAGEMENT_MULTICAST_SUBNET=ff08::1:1:0/124
|
|
||||||
|
|
||||||
[cCLUSTER]
|
|
||||||
# Cluster Host Network Configuration
|
|
||||||
CLUSTER_INTERFACE_NAME=eth1
|
|
||||||
CLUSTER_INTERFACE=eth1
|
|
||||||
CLUSTER_VLAN=NC
|
|
||||||
CLUSTER_MTU=1500
|
|
||||||
CLUSTER_SUBNET=192.168.206.0/24
|
|
||||||
LAG_CLUSTER_INTERFACE=no
|
|
||||||
|
|
||||||
[cEXT_OAM]
|
|
||||||
# External OAM Network Configuration
|
|
||||||
EXTERNAL_OAM_INTERFACE_NAME=eth0
|
|
||||||
EXTERNAL_OAM_INTERFACE=eth0
|
|
||||||
EXTERNAL_OAM_VLAN=NC
|
|
||||||
EXTERNAL_OAM_MTU=1500
|
|
||||||
LAG_EXTERNAL_OAM_INTERFACE=no
|
|
||||||
EXTERNAL_OAM_SUBNET=abcd::/64
|
|
||||||
EXTERNAL_OAM_GATEWAY_ADDRESS=abcd::1
|
|
||||||
EXTERNAL_OAM_FLOATING_ADDRESS=abcd::2
|
|
||||||
EXTERNAL_OAM_0_ADDRESS=abcd::3
|
|
||||||
EXTERNAL_OAM_1_ADDRESS=abcd::4
|
|
||||||
|
|
||||||
[cDNS]
|
|
||||||
# DNS Configuration
|
|
||||||
NAMESERVER_1=8.8.8.8
|
|
||||||
NAMESERVER_2=8.8.4.4
|
|
||||||
NAMESERVER_3=NC
|
|
||||||
|
|
||||||
[cSECURITY]
|
|
||||||
[cREGION]
|
|
||||||
# Region Configuration
|
|
||||||
REGION_CONFIG=False
|
|
||||||
|
|
||||||
[cAUTHENTICATION]
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
|
@ -1,76 +0,0 @@
|
||||||
[cSYSTEM]
|
|
||||||
# System Configuration
|
|
||||||
SYSTEM_MODE=duplex
|
|
||||||
TIMEZONE=UTC
|
|
||||||
|
|
||||||
[cPXEBOOT]
|
|
||||||
# PXEBoot Network Support Configuration
|
|
||||||
PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller
|
|
||||||
|
|
||||||
[cMGMT]
|
|
||||||
# Management Network Configuration
|
|
||||||
MANAGEMENT_INTERFACE_NAME=eth1
|
|
||||||
MANAGEMENT_INTERFACE=eth1
|
|
||||||
MANAGEMENT_MTU=1500
|
|
||||||
MANAGEMENT_SUBNET=192.168.204.0/24
|
|
||||||
LAG_MANAGEMENT_INTERFACE=no
|
|
||||||
CONTROLLER_FLOATING_ADDRESS=192.168.204.2
|
|
||||||
CONTROLLER_0_ADDRESS=192.168.204.3
|
|
||||||
CONTROLLER_1_ADDRESS=192.168.204.4
|
|
||||||
NFS_MANAGEMENT_ADDRESS_1=192.168.204.5
|
|
||||||
NFS_MANAGEMENT_ADDRESS_2=192.168.204.6
|
|
||||||
CONTROLLER_FLOATING_HOSTNAME=controller
|
|
||||||
CONTROLLER_HOSTNAME_PREFIX=controller-
|
|
||||||
OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller
|
|
||||||
DYNAMIC_ADDRESS_ALLOCATION=yes
|
|
||||||
MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28
|
|
||||||
|
|
||||||
[cCLUSTER]
|
|
||||||
# Cluster Host Network Configuration
|
|
||||||
CLUSTER_INTERFACE_NAME=eth1
|
|
||||||
CLUSTER_INTERFACE=eth1
|
|
||||||
CLUSTER_VLAN=NC
|
|
||||||
CLUSTER_MTU=1500
|
|
||||||
CLUSTER_SUBNET=192.168.206.0/24
|
|
||||||
LAG_CLUSTER_INTERFACE=no
|
|
||||||
|
|
||||||
[cEXT_OAM]
|
|
||||||
# External OAM Network Configuration
|
|
||||||
EXTERNAL_OAM_INTERFACE_NAME=eth0
|
|
||||||
EXTERNAL_OAM_INTERFACE=eth0
|
|
||||||
EXTERNAL_OAM_VLAN=NC
|
|
||||||
EXTERNAL_OAM_MTU=1500
|
|
||||||
LAG_EXTERNAL_OAM_INTERFACE=no
|
|
||||||
EXTERNAL_OAM_SUBNET=10.10.10.0/24
|
|
||||||
EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1
|
|
||||||
EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2
|
|
||||||
EXTERNAL_OAM_0_ADDRESS=10.10.10.3
|
|
||||||
EXTERNAL_OAM_1_ADDRESS=10.10.10.4
|
|
||||||
|
|
||||||
[cDNS]
|
|
||||||
# DNS Configuration
|
|
||||||
NAMESERVER_1=1.2.3.4
|
|
||||||
NAMESERVER_2=5.6.7.8
|
|
||||||
NAMESERVER_3=NC
|
|
||||||
|
|
||||||
[cDOCKER_PROXY]
|
|
||||||
# Docker Proxy Configuration
|
|
||||||
DOCKER_HTTP_PROXY=http://proxy.com:123
|
|
||||||
DOCKER_HTTPS_PROXY=https://proxy.com:123
|
|
||||||
DOCKER_NO_PROXY=localhost,127.0.0.1,192.168.204.2
|
|
||||||
|
|
||||||
[cDOCKER_REGISTRY]
|
|
||||||
# Docker Registry Configuration
|
|
||||||
DOCKER_K8S_REGISTRY=my.registry.com:5000
|
|
||||||
DOCKER_GCR_REGISTRY=my.registry.com
|
|
||||||
DOCKER_QUAY_REGISTRY=1.2.3.4:5000
|
|
||||||
DOCKER_DOCKER_REGISTRY=[1:2:3:4:a:b:c:d]:5000
|
|
||||||
IS_SECURE_REGISTRY=False
|
|
||||||
|
|
||||||
[cSECURITY]
|
|
||||||
[cREGION]
|
|
||||||
# Region Configuration
|
|
||||||
REGION_CONFIG=False
|
|
||||||
|
|
||||||
[cAUTHENTICATION]
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
|
@ -1,94 +0,0 @@
|
||||||
[cSYSTEM]
|
|
||||||
# System Configuration
|
|
||||||
SYSTEM_MODE=duplex
|
|
||||||
TIMEZONE=UTC
|
|
||||||
|
|
||||||
[cPXEBOOT]
|
|
||||||
# PXEBoot Network Support Configuration
|
|
||||||
PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller
|
|
||||||
|
|
||||||
[cMGMT]
|
|
||||||
# Management Network Configuration
|
|
||||||
MANAGEMENT_INTERFACE_NAME=eth1
|
|
||||||
MANAGEMENT_INTERFACE=eth1
|
|
||||||
MANAGEMENT_MTU=1500
|
|
||||||
MANAGEMENT_SUBNET=192.168.204.0/24
|
|
||||||
LAG_MANAGEMENT_INTERFACE=no
|
|
||||||
CONTROLLER_FLOATING_ADDRESS=192.168.204.102
|
|
||||||
CONTROLLER_0_ADDRESS=192.168.204.103
|
|
||||||
CONTROLLER_1_ADDRESS=192.168.204.104
|
|
||||||
NFS_MANAGEMENT_ADDRESS_1=192.168.204.105
|
|
||||||
NFS_MANAGEMENT_ADDRESS_2=192.168.204.106
|
|
||||||
CONTROLLER_FLOATING_HOSTNAME=controller
|
|
||||||
CONTROLLER_HOSTNAME_PREFIX=controller-
|
|
||||||
OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller
|
|
||||||
DYNAMIC_ADDRESS_ALLOCATION=yes
|
|
||||||
MANAGEMENT_START_ADDRESS=192.168.204.102
|
|
||||||
MANAGEMENT_END_ADDRESS=192.168.204.199
|
|
||||||
MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28
|
|
||||||
|
|
||||||
[cCLUSTER]
|
|
||||||
# Cluster Host Network Configuration
|
|
||||||
CLUSTER_INTERFACE_NAME=eth1
|
|
||||||
CLUSTER_INTERFACE=eth1
|
|
||||||
CLUSTER_VLAN=NC
|
|
||||||
CLUSTER_MTU=1500
|
|
||||||
CLUSTER_SUBNET=192.168.206.0/24
|
|
||||||
LAG_CLUSTER_INTERFACE=no
|
|
||||||
|
|
||||||
[cEXT_OAM]
|
|
||||||
# External OAM Network Configuration
|
|
||||||
EXTERNAL_OAM_INTERFACE_NAME=eth0
|
|
||||||
EXTERNAL_OAM_INTERFACE=eth0
|
|
||||||
EXTERNAL_OAM_VLAN=NC
|
|
||||||
EXTERNAL_OAM_MTU=1500
|
|
||||||
LAG_EXTERNAL_OAM_INTERFACE=no
|
|
||||||
EXTERNAL_OAM_SUBNET=10.10.10.0/24
|
|
||||||
EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1
|
|
||||||
EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2
|
|
||||||
EXTERNAL_OAM_0_ADDRESS=10.10.10.3
|
|
||||||
EXTERNAL_OAM_1_ADDRESS=10.10.10.4
|
|
||||||
|
|
||||||
[cDNS]
|
|
||||||
# DNS Configuration
|
|
||||||
NAMESERVER_1=8.8.8.8
|
|
||||||
NAMESERVER_2=8.8.4.4
|
|
||||||
NAMESERVER_3=NC
|
|
||||||
|
|
||||||
[cSECURITY]
|
|
||||||
[cREGION]
|
|
||||||
# Region Configuration
|
|
||||||
REGION_CONFIG=True
|
|
||||||
REGION_1_NAME=RegionOne
|
|
||||||
REGION_2_NAME=RegionTwo
|
|
||||||
ADMIN_USER_NAME=admin
|
|
||||||
ADMIN_USER_DOMAIN=Default
|
|
||||||
ADMIN_PROJECT_NAME=admin
|
|
||||||
ADMIN_PROJECT_DOMAIN=Default
|
|
||||||
SERVICE_PROJECT_NAME=service
|
|
||||||
SERVICE_USER_DOMAIN=Default
|
|
||||||
SERVICE_PROJECT_DOMAIN=Default
|
|
||||||
KEYSTONE_AUTH_URI=http://192.168.204.12:8081/keystone/main/v2.0
|
|
||||||
KEYSTONE_IDENTITY_URI=http://192.168.204.12:8081/keystone/admin/v2.0
|
|
||||||
KEYSTONE_ADMIN_URI=http://192.168.204.12:8081/keystone/admin/v2.0
|
|
||||||
KEYSTONE_INTERNAL_URI=http://192.168.204.12:8081/keystone/main/v2.0
|
|
||||||
KEYSTONE_PUBLIC_URI=http://10.10.10.2:8081/keystone/main/v2.0
|
|
||||||
KEYSTONE_SERVICE_NAME=keystone
|
|
||||||
KEYSTONE_SERVICE_TYPE=identity
|
|
||||||
PATCHING_USER_NAME=patching
|
|
||||||
PATCHING_PASSWORD=password2WO*
|
|
||||||
SYSINV_USER_NAME=sysinv
|
|
||||||
SYSINV_PASSWORD=password2WO*
|
|
||||||
SYSINV_SERVICE_NAME=sysinv
|
|
||||||
SYSINV_SERVICE_TYPE=platform
|
|
||||||
NFV_USER_NAME=vim
|
|
||||||
NFV_PASSWORD=password2WO*
|
|
||||||
MTCE_USER_NAME=mtce
|
|
||||||
MTCE_PASSWORD=password2WO*
|
|
||||||
FM_USER_NAME=fm
|
|
||||||
FM_PASSWORD=password2WO*
|
|
||||||
BARBICAN_USER_NAME=barbican
|
|
||||||
BARBICAN_PASSWORD=barbican2WO*
|
|
||||||
|
|
||||||
[cAUTHENTICATION]
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
|
@ -1,94 +0,0 @@
|
||||||
[cSYSTEM]
|
|
||||||
# System Configuration
|
|
||||||
SYSTEM_MODE=duplex
|
|
||||||
TIMEZONE=UTC
|
|
||||||
|
|
||||||
[cPXEBOOT]
|
|
||||||
# PXEBoot Network Support Configuration
|
|
||||||
PXECONTROLLER_FLOATING_HOSTNAME=pxecontroller
|
|
||||||
|
|
||||||
[cMGMT]
|
|
||||||
# Management Network Configuration
|
|
||||||
MANAGEMENT_INTERFACE_NAME=eth1
|
|
||||||
MANAGEMENT_INTERFACE=eth1
|
|
||||||
MANAGEMENT_MTU=1500
|
|
||||||
MANAGEMENT_SUBNET=192.168.204.0/24
|
|
||||||
LAG_MANAGEMENT_INTERFACE=no
|
|
||||||
CONTROLLER_FLOATING_ADDRESS=192.168.204.102
|
|
||||||
CONTROLLER_0_ADDRESS=192.168.204.103
|
|
||||||
CONTROLLER_1_ADDRESS=192.168.204.104
|
|
||||||
NFS_MANAGEMENT_ADDRESS_1=192.168.204.105
|
|
||||||
NFS_MANAGEMENT_ADDRESS_2=192.168.204.106
|
|
||||||
CONTROLLER_FLOATING_HOSTNAME=controller
|
|
||||||
CONTROLLER_HOSTNAME_PREFIX=controller-
|
|
||||||
OAMCONTROLLER_FLOATING_HOSTNAME=oamcontroller
|
|
||||||
DYNAMIC_ADDRESS_ALLOCATION=yes
|
|
||||||
MANAGEMENT_START_ADDRESS=192.168.204.102
|
|
||||||
MANAGEMENT_END_ADDRESS=192.168.204.199
|
|
||||||
MANAGEMENT_MULTICAST_SUBNET=239.1.1.0/28
|
|
||||||
|
|
||||||
[cCLUSTER]
|
|
||||||
# Cluster Host Network Configuration
|
|
||||||
CLUSTER_INTERFACE_NAME=eth1
|
|
||||||
CLUSTER_INTERFACE=eth1
|
|
||||||
CLUSTER_VLAN=NC
|
|
||||||
CLUSTER_MTU=1500
|
|
||||||
CLUSTER_SUBNET=192.168.206.0/24
|
|
||||||
LAG_CLUSTER_INTERFACE=no
|
|
||||||
|
|
||||||
[cEXT_OAM]
|
|
||||||
# External OAM Network Configuration
|
|
||||||
EXTERNAL_OAM_INTERFACE_NAME=eth0
|
|
||||||
EXTERNAL_OAM_INTERFACE=eth0
|
|
||||||
EXTERNAL_OAM_VLAN=NC
|
|
||||||
EXTERNAL_OAM_MTU=1500
|
|
||||||
LAG_EXTERNAL_OAM_INTERFACE=no
|
|
||||||
EXTERNAL_OAM_SUBNET=10.10.10.0/24
|
|
||||||
EXTERNAL_OAM_GATEWAY_ADDRESS=10.10.10.1
|
|
||||||
EXTERNAL_OAM_FLOATING_ADDRESS=10.10.10.2
|
|
||||||
EXTERNAL_OAM_0_ADDRESS=10.10.10.3
|
|
||||||
EXTERNAL_OAM_1_ADDRESS=10.10.10.4
|
|
||||||
|
|
||||||
[cDNS]
|
|
||||||
# DNS Configuration
|
|
||||||
NAMESERVER_1=8.8.8.8
|
|
||||||
NAMESERVER_2=8.8.4.4
|
|
||||||
NAMESERVER_3=NC
|
|
||||||
|
|
||||||
[cSECURITY]
|
|
||||||
[cREGION]
|
|
||||||
# Region Configuration
|
|
||||||
REGION_CONFIG=True
|
|
||||||
REGION_1_NAME=RegionOne
|
|
||||||
REGION_2_NAME=RegionTwo
|
|
||||||
ADMIN_USER_NAME=admin
|
|
||||||
ADMIN_USER_DOMAIN=Default
|
|
||||||
ADMIN_PROJECT_NAME=admin
|
|
||||||
ADMIN_PROJECT_DOMAIN=Default
|
|
||||||
SERVICE_PROJECT_NAME=service
|
|
||||||
SERVICE_USER_DOMAIN=Default
|
|
||||||
SERVICE_PROJECT_DOMAIN=Default
|
|
||||||
KEYSTONE_AUTH_URI=http://192.168.204.12:8081/keystone/main/v2.0
|
|
||||||
KEYSTONE_IDENTITY_URI=http://192.168.204.12:8081/keystone/admin/v2.0
|
|
||||||
KEYSTONE_ADMIN_URI=http://192.168.204.12:8081/keystone/admin/v2.0
|
|
||||||
KEYSTONE_INTERNAL_URI=http://192.168.204.12:8081/keystone/main/v2.0
|
|
||||||
KEYSTONE_PUBLIC_URI=http://10.10.10.2:8081/keystone/main/v2.0
|
|
||||||
KEYSTONE_SERVICE_NAME=keystone
|
|
||||||
KEYSTONE_SERVICE_TYPE=identity
|
|
||||||
PATCHING_USER_NAME=patching
|
|
||||||
PATCHING_PASSWORD=password2WO*
|
|
||||||
SYSINV_USER_NAME=sysinv
|
|
||||||
SYSINV_PASSWORD=password2WO*
|
|
||||||
SYSINV_SERVICE_NAME=sysinv
|
|
||||||
SYSINV_SERVICE_TYPE=platform
|
|
||||||
NFV_USER_NAME=vim
|
|
||||||
NFV_PASSWORD=password2WO*
|
|
||||||
MTCE_USER_NAME=mtce
|
|
||||||
MTCE_PASSWORD=password2WO*
|
|
||||||
FM_USER_NAME=fm
|
|
||||||
FM_PASSWORD=password2WO*
|
|
||||||
BARBICAN_USER_NAME=barbican
|
|
||||||
BARBICAN_PASSWORD=barbican2WO*
|
|
||||||
|
|
||||||
[cAUTHENTICATION]
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
|
@ -1,72 +0,0 @@
|
||||||
[SYSTEM]
|
|
||||||
SYSTEM_MODE=duplex
|
|
||||||
TIMEZONE=UTC
|
|
||||||
|
|
||||||
[STORAGE]
|
|
||||||
|
|
||||||
;LOGICAL_INTERFACE_<number>
|
|
||||||
; LAG_INTERFACE <Y/N>
|
|
||||||
; LAG_MODE One of 1) Active-backup policy
|
|
||||||
; 2) Balanced XOR policy
|
|
||||||
; 4) 802.3ad (LACP) policy
|
|
||||||
; Interface for pxebooting can only be LACP
|
|
||||||
; INTERFACE_MTU <mtu size>
|
|
||||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_1]
|
|
||||||
LAG_INTERFACE=Y
|
|
||||||
LAG_MODE=4
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth1,eth2
|
|
||||||
|
|
||||||
[CLM_NETWORK]
|
|
||||||
CLM_VLAN=123
|
|
||||||
CLM_IP_START_ADDRESS=192.168.204.102
|
|
||||||
CLM_IP_END_ADDRESS=192.168.204.199
|
|
||||||
CLM_CIDR=192.168.204.0/24
|
|
||||||
CLM_MULTICAST_CIDR=239.1.1.0/28
|
|
||||||
CLM_GATEWAY=192.168.204.12
|
|
||||||
CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
|
||||||
|
|
||||||
[CAN_NETWORK]
|
|
||||||
CAN_VLAN=125
|
|
||||||
CAN_IP_START_ADDRESS=10.10.10.2
|
|
||||||
CAN_IP_END_ADDRESS=10.10.10.4
|
|
||||||
CAN_CIDR=10.10.10.0/24
|
|
||||||
;CAN_GATEWAY=10.10.10.1
|
|
||||||
CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
|
||||||
|
|
||||||
[REGION2_PXEBOOT_NETWORK]
|
|
||||||
PXEBOOT_CIDR=192.168.203.0/24
|
|
||||||
|
|
||||||
[SHARED_SERVICES]
|
|
||||||
REGION_NAME=RegionOne
|
|
||||||
ADMIN_PROJECT_NAME=admin
|
|
||||||
ADMIN_USER_NAME=admin
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
||||||
KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0
|
|
||||||
KEYSTONE_SERVICE_NAME=keystone
|
|
||||||
KEYSTONE_SERVICE_TYPE=identity
|
|
||||||
SERVICE_PROJECT_NAME=service
|
|
||||||
|
|
||||||
[REGION_2_SERVICES]
|
|
||||||
REGION_NAME=RegionTwo
|
|
||||||
SYSINV_USER_NAME=sysinv
|
|
||||||
SYSINV_PASSWORD=password2WO*
|
|
||||||
SYSINV_SERVICE_NAME=sysinv
|
|
||||||
SYSINV_SERVICE_TYPE=platform
|
|
||||||
PATCHING_USER_NAME=patching
|
|
||||||
PATCHING_PASSWORD=password2WO*
|
|
||||||
PATCHING_SERVICE_NAME=patching
|
|
||||||
PATCHING_SERVICE_TYPE=patching
|
|
||||||
NFV_USER_NAME=vim
|
|
||||||
NFV_PASSWORD=password2WO*
|
|
||||||
MTCE_USER_NAME=mtce
|
|
||||||
MTCE_PASSWORD=password2WO*
|
|
||||||
FM_USER_NAME=fm
|
|
||||||
FM_PASSWORD=password2WO*
|
|
||||||
BARBICAN_USER_NAME=barbican
|
|
||||||
BARBICAN_PASSWORD=barbican2WO*
|
|
||||||
|
|
||||||
[VERSION]
|
|
||||||
RELEASE = TEST.SW.VERSION
|
|
|
@ -1,82 +0,0 @@
|
||||||
[cSYSTEM]
|
|
||||||
TIMEZONE = UTC
|
|
||||||
SYSTEM_MODE = duplex
|
|
||||||
|
|
||||||
[cPXEBOOT]
|
|
||||||
PXEBOOT_SUBNET = 192.168.203.0/24
|
|
||||||
CONTROLLER_PXEBOOT_FLOATING_ADDRESS = 192.168.203.2
|
|
||||||
CONTROLLER_PXEBOOT_ADDRESS_0 = 192.168.203.3
|
|
||||||
CONTROLLER_PXEBOOT_ADDRESS_1 = 192.168.203.4
|
|
||||||
PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller
|
|
||||||
|
|
||||||
[cMGMT]
|
|
||||||
MANAGEMENT_MTU = 1500
|
|
||||||
MANAGEMENT_SUBNET = 192.168.204.0/24
|
|
||||||
LAG_MANAGEMENT_INTERFACE = yes
|
|
||||||
MANAGEMENT_BOND_MEMBER_0 = eth1
|
|
||||||
MANAGEMENT_BOND_MEMBER_1 = eth2
|
|
||||||
MANAGEMENT_BOND_POLICY = 802.3ad
|
|
||||||
MANAGEMENT_INTERFACE = bond0
|
|
||||||
MANAGEMENT_VLAN = 123
|
|
||||||
MANAGEMENT_INTERFACE_NAME = bond0.123
|
|
||||||
MANAGEMENT_GATEWAY_ADDRESS = 192.168.204.12
|
|
||||||
CONTROLLER_FLOATING_ADDRESS = 192.168.204.102
|
|
||||||
CONTROLLER_0_ADDRESS = 192.168.204.103
|
|
||||||
CONTROLLER_1_ADDRESS = 192.168.204.104
|
|
||||||
NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105
|
|
||||||
NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106
|
|
||||||
CONTROLLER_FLOATING_HOSTNAME = controller
|
|
||||||
CONTROLLER_HOSTNAME_PREFIX = controller-
|
|
||||||
OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller
|
|
||||||
DYNAMIC_ADDRESS_ALLOCATION = no
|
|
||||||
MANAGEMENT_START_ADDRESS = 192.168.204.102
|
|
||||||
MANAGEMENT_END_ADDRESS = 192.168.204.199
|
|
||||||
MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28
|
|
||||||
|
|
||||||
[cEXT_OAM]
|
|
||||||
EXTERNAL_OAM_MTU = 1500
|
|
||||||
EXTERNAL_OAM_SUBNET = 10.10.10.0/24
|
|
||||||
LAG_EXTERNAL_OAM_INTERFACE = no
|
|
||||||
EXTERNAL_OAM_INTERFACE = bond0
|
|
||||||
EXTERNAL_OAM_VLAN = 125
|
|
||||||
EXTERNAL_OAM_INTERFACE_NAME = bond0.125
|
|
||||||
EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2
|
|
||||||
EXTERNAL_OAM_0_ADDRESS = 10.10.10.3
|
|
||||||
EXTERNAL_OAM_1_ADDRESS = 10.10.10.4
|
|
||||||
|
|
||||||
[cREGION]
|
|
||||||
REGION_CONFIG = True
|
|
||||||
REGION_1_NAME = RegionOne
|
|
||||||
REGION_2_NAME = RegionTwo
|
|
||||||
ADMIN_USER_NAME = admin
|
|
||||||
ADMIN_USER_DOMAIN = Default
|
|
||||||
ADMIN_PROJECT_NAME = admin
|
|
||||||
ADMIN_PROJECT_DOMAIN = Default
|
|
||||||
SERVICE_PROJECT_NAME = service
|
|
||||||
KEYSTONE_SERVICE_NAME = keystone
|
|
||||||
KEYSTONE_SERVICE_TYPE = identity
|
|
||||||
PATCHING_USER_NAME = patching
|
|
||||||
PATCHING_PASSWORD = password2WO*
|
|
||||||
SYSINV_USER_NAME = sysinv
|
|
||||||
SYSINV_PASSWORD = password2WO*
|
|
||||||
SYSINV_SERVICE_NAME = sysinv
|
|
||||||
SYSINV_SERVICE_TYPE = platform
|
|
||||||
NFV_USER_NAME = vim
|
|
||||||
NFV_PASSWORD = password2WO*
|
|
||||||
MTCE_USER_NAME = mtce
|
|
||||||
MTCE_PASSWORD = password2WO*
|
|
||||||
FM_USER_NAME = fm
|
|
||||||
FM_PASSWORD = password2WO*
|
|
||||||
BARBICAN_USER_NAME = barbican
|
|
||||||
BARBICAN_PASSWORD = barbican2WO*
|
|
||||||
USER_DOMAIN_NAME = Default
|
|
||||||
PROJECT_DOMAIN_NAME = Default
|
|
||||||
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
|
||||||
KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
|
||||||
KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
|
||||||
KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
|
||||||
KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0
|
|
||||||
|
|
||||||
[cAUTHENTICATION]
|
|
||||||
ADMIN_PASSWORD = Li69nux*
|
|
||||||
|
|
|
@ -1,81 +0,0 @@
|
||||||
[SYSTEM]
|
|
||||||
SYSTEM_MODE = duplex
|
|
||||||
|
|
||||||
[STORAGE]
|
|
||||||
|
|
||||||
;LOGICAL_INTERFACE_<number>
|
|
||||||
; LAG_INTERFACE <Y/N>
|
|
||||||
; LAG_MODE One of 1) Active-backup policy
|
|
||||||
; 2) Balanced XOR policy
|
|
||||||
; 4) 802.3ad (LACP) policy
|
|
||||||
; Interface for pxebooting can only be LACP
|
|
||||||
; INTERFACE_MTU <mtu size>
|
|
||||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_1]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth1
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_2]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth0
|
|
||||||
|
|
||||||
[CLM_NETWORK]
|
|
||||||
;CLM_VLAN=123
|
|
||||||
CLM_IP_START_ADDRESS=192.168.204.102
|
|
||||||
CLM_IP_END_ADDRESS=192.168.204.199
|
|
||||||
CLM_CIDR=192.168.204.0/24
|
|
||||||
CLM_MULTICAST_CIDR=239.1.1.0/28
|
|
||||||
;CLM_GATEWAY=192.168.204.12
|
|
||||||
CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
|
||||||
|
|
||||||
[CAN_NETWORK]
|
|
||||||
;CAN_VLAN=
|
|
||||||
CAN_IP_START_ADDRESS=10.10.10.2
|
|
||||||
CAN_IP_END_ADDRESS=10.10.10.4
|
|
||||||
CAN_CIDR=10.10.10.0/24
|
|
||||||
CAN_GATEWAY=10.10.10.1
|
|
||||||
CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
|
||||||
|
|
||||||
;[REGION2_PXEBOOT_NETWORK]
|
|
||||||
;PXEBOOT_CIDR=192.168.203.0/24
|
|
||||||
|
|
||||||
[NETWORK]
|
|
||||||
VSWITCH_TYPE=nuage_vrs
|
|
||||||
METADATA_PROXY_SHARED_SECRET=NuageNetworksSharedSecret
|
|
||||||
|
|
||||||
[SHARED_SERVICES]
|
|
||||||
REGION_NAME=RegionOne
|
|
||||||
ADMIN_PROJECT_NAME=admin
|
|
||||||
ADMIN_USER_NAME=admin
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
||||||
KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0
|
|
||||||
KEYSTONE_SERVICE_NAME=keystone
|
|
||||||
KEYSTONE_SERVICE_TYPE=identity
|
|
||||||
SERVICE_PROJECT_NAME=service
|
|
||||||
|
|
||||||
[REGION_2_SERVICES]
|
|
||||||
REGION_NAME=RegionTwo
|
|
||||||
SYSINV_USER_NAME=sysinv
|
|
||||||
SYSINV_PASSWORD=password2WO*
|
|
||||||
SYSINV_SERVICE_NAME=sysinv
|
|
||||||
SYSINV_SERVICE_TYPE=platform
|
|
||||||
PATCHING_USER_NAME=patching
|
|
||||||
PATCHING_PASSWORD=password2WO*
|
|
||||||
PATCHING_SERVICE_NAME=patching
|
|
||||||
PATCHING_SERVICE_TYPE=patching
|
|
||||||
NFV_USER_NAME=vim
|
|
||||||
NFV_PASSWORD=password2WO*
|
|
||||||
MTCE_USER_NAME=mtce
|
|
||||||
MTCE_PASSWORD=password2WO*
|
|
||||||
FM_USER_NAME=fm
|
|
||||||
FM_PASSWORD=password2WO*
|
|
||||||
BARBICAN_USER_NAME=barbican
|
|
||||||
BARBICAN_PASSWORD=barbican2WO*
|
|
||||||
|
|
||||||
[VERSION]
|
|
||||||
RELEASE = TEST.SW.VERSION
|
|
|
@ -1,73 +0,0 @@
|
||||||
[cSYSTEM]
|
|
||||||
TIMEZONE = UTC
|
|
||||||
SYSTEM_MODE = duplex
|
|
||||||
|
|
||||||
[cPXEBOOT]
|
|
||||||
PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller
|
|
||||||
|
|
||||||
[cMGMT]
|
|
||||||
MANAGEMENT_MTU = 1500
|
|
||||||
MANAGEMENT_SUBNET = 192.168.204.0/24
|
|
||||||
LAG_MANAGEMENT_INTERFACE = no
|
|
||||||
MANAGEMENT_INTERFACE = eth1
|
|
||||||
MANAGEMENT_INTERFACE_NAME = eth1
|
|
||||||
CONTROLLER_FLOATING_ADDRESS = 192.168.204.102
|
|
||||||
CONTROLLER_0_ADDRESS = 192.168.204.103
|
|
||||||
CONTROLLER_1_ADDRESS = 192.168.204.104
|
|
||||||
NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105
|
|
||||||
NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106
|
|
||||||
CONTROLLER_FLOATING_HOSTNAME = controller
|
|
||||||
CONTROLLER_HOSTNAME_PREFIX = controller-
|
|
||||||
OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller
|
|
||||||
DYNAMIC_ADDRESS_ALLOCATION = no
|
|
||||||
MANAGEMENT_START_ADDRESS = 192.168.204.102
|
|
||||||
MANAGEMENT_END_ADDRESS = 192.168.204.199
|
|
||||||
MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28
|
|
||||||
|
|
||||||
[cEXT_OAM]
|
|
||||||
EXTERNAL_OAM_MTU = 1500
|
|
||||||
EXTERNAL_OAM_SUBNET = 10.10.10.0/24
|
|
||||||
LAG_EXTERNAL_OAM_INTERFACE = no
|
|
||||||
EXTERNAL_OAM_INTERFACE = eth0
|
|
||||||
EXTERNAL_OAM_INTERFACE_NAME = eth0
|
|
||||||
EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1
|
|
||||||
EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2
|
|
||||||
EXTERNAL_OAM_0_ADDRESS = 10.10.10.3
|
|
||||||
EXTERNAL_OAM_1_ADDRESS = 10.10.10.4
|
|
||||||
|
|
||||||
[cREGION]
|
|
||||||
REGION_CONFIG = True
|
|
||||||
REGION_1_NAME = RegionOne
|
|
||||||
REGION_2_NAME = RegionTwo
|
|
||||||
ADMIN_USER_NAME = admin
|
|
||||||
ADMIN_USER_DOMAIN = Default
|
|
||||||
ADMIN_PROJECT_NAME = admin
|
|
||||||
ADMIN_PROJECT_DOMAIN = Default
|
|
||||||
SERVICE_PROJECT_NAME = service
|
|
||||||
KEYSTONE_SERVICE_NAME = keystone
|
|
||||||
KEYSTONE_SERVICE_TYPE = identity
|
|
||||||
PATCHING_USER_NAME = patching
|
|
||||||
PATCHING_PASSWORD = password2WO*
|
|
||||||
SYSINV_USER_NAME = sysinv
|
|
||||||
SYSINV_PASSWORD = password2WO*
|
|
||||||
SYSINV_SERVICE_NAME = sysinv
|
|
||||||
SYSINV_SERVICE_TYPE = platform
|
|
||||||
NFV_USER_NAME = vim
|
|
||||||
NFV_PASSWORD = password2WO*
|
|
||||||
MTCE_USER_NAME = mtce
|
|
||||||
MTCE_PASSWORD = password2WO*
|
|
||||||
FM_USER_NAME = fm
|
|
||||||
FM_PASSWORD = password2WO*
|
|
||||||
BARBICAN_USER_NAME = barbican
|
|
||||||
BARBICAN_PASSWORD = barbican2WO*
|
|
||||||
USER_DOMAIN_NAME = Default
|
|
||||||
PROJECT_DOMAIN_NAME = Default
|
|
||||||
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
|
||||||
KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
|
||||||
KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
|
||||||
KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
|
||||||
KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0
|
|
||||||
|
|
||||||
[cAUTHENTICATION]
|
|
||||||
ADMIN_PASSWORD = Li69nux*
|
|
||||||
|
|
|
@ -1,77 +0,0 @@
|
||||||
[SYSTEM]
|
|
||||||
SYSTEM_MODE = duplex
|
|
||||||
|
|
||||||
[STORAGE]
|
|
||||||
|
|
||||||
;LOGICAL_INTERFACE_<number>
|
|
||||||
; LAG_INTERFACE <Y/N>
|
|
||||||
; LAG_MODE One of 1) Active-backup policy
|
|
||||||
; 2) Balanced XOR policy
|
|
||||||
; 4) 802.3ad (LACP) policy
|
|
||||||
; Interface for pxebooting can only be LACP
|
|
||||||
; INTERFACE_MTU <mtu size>
|
|
||||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_1]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth1
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_2]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth0
|
|
||||||
|
|
||||||
[CLM_NETWORK]
|
|
||||||
;CLM_VLAN=123
|
|
||||||
CLM_IP_START_ADDRESS=192.168.204.102
|
|
||||||
CLM_IP_END_ADDRESS=192.168.204.199
|
|
||||||
CLM_CIDR=192.168.204.0/24
|
|
||||||
CLM_MULTICAST_CIDR=239.1.1.0/28
|
|
||||||
;CLM_GATEWAY=192.168.204.12
|
|
||||||
CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
|
||||||
|
|
||||||
[CAN_NETWORK]
|
|
||||||
;CAN_VLAN=
|
|
||||||
CAN_IP_START_ADDRESS=10.10.10.2
|
|
||||||
CAN_IP_END_ADDRESS=10.10.10.4
|
|
||||||
CAN_CIDR=10.10.10.0/24
|
|
||||||
CAN_GATEWAY=10.10.10.1
|
|
||||||
CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
|
||||||
|
|
||||||
;[REGION2_PXEBOOT_NETWORK]
|
|
||||||
;PXEBOOT_CIDR=192.168.203.0/24
|
|
||||||
|
|
||||||
[SHARED_SERVICES]
|
|
||||||
REGION_NAME=RegionOne
|
|
||||||
ADMIN_PROJECT_NAME=admin
|
|
||||||
ADMIN_USER_NAME=admin
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
||||||
KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0
|
|
||||||
KEYSTONE_SERVICE_NAME=keystone
|
|
||||||
KEYSTONE_SERVICE_TYPE=identity
|
|
||||||
SERVICE_PROJECT_NAME=service
|
|
||||||
|
|
||||||
[REGION_2_SERVICES]
|
|
||||||
REGION_NAME=RegionTwo
|
|
||||||
SYSINV_USER_NAME=sysinv
|
|
||||||
SYSINV_PASSWORD=password2WO*
|
|
||||||
SYSINV_SERVICE_NAME=sysinv
|
|
||||||
SYSINV_SERVICE_TYPE=platform
|
|
||||||
PATCHING_USER_NAME=patching
|
|
||||||
PATCHING_PASSWORD=password2WO*
|
|
||||||
PATCHING_SERVICE_NAME=patching
|
|
||||||
PATCHING_SERVICE_TYPE=patching
|
|
||||||
NFV_USER_NAME=vim
|
|
||||||
NFV_PASSWORD=password2WO*
|
|
||||||
MTCE_USER_NAME=mtce
|
|
||||||
MTCE_PASSWORD=password2WO*
|
|
||||||
FM_USER_NAME=fm
|
|
||||||
FM_PASSWORD=password2WO*
|
|
||||||
BARBICAN_USER_NAME=barbican
|
|
||||||
BARBICAN_PASSWORD=barbican2WO*
|
|
||||||
|
|
||||||
[VERSION]
|
|
||||||
RELEASE = TEST.SW.VERSION
|
|
|
@ -1,73 +0,0 @@
|
||||||
[cSYSTEM]
|
|
||||||
TIMEZONE = UTC
|
|
||||||
SYSTEM_MODE = duplex
|
|
||||||
|
|
||||||
[cPXEBOOT]
|
|
||||||
PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller
|
|
||||||
|
|
||||||
[cMGMT]
|
|
||||||
MANAGEMENT_MTU = 1500
|
|
||||||
MANAGEMENT_SUBNET = 192.168.204.0/24
|
|
||||||
LAG_MANAGEMENT_INTERFACE = no
|
|
||||||
MANAGEMENT_INTERFACE = eth1
|
|
||||||
MANAGEMENT_INTERFACE_NAME = eth1
|
|
||||||
CONTROLLER_FLOATING_ADDRESS = 192.168.204.102
|
|
||||||
CONTROLLER_0_ADDRESS = 192.168.204.103
|
|
||||||
CONTROLLER_1_ADDRESS = 192.168.204.104
|
|
||||||
NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105
|
|
||||||
NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106
|
|
||||||
CONTROLLER_FLOATING_HOSTNAME = controller
|
|
||||||
CONTROLLER_HOSTNAME_PREFIX = controller-
|
|
||||||
OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller
|
|
||||||
DYNAMIC_ADDRESS_ALLOCATION = no
|
|
||||||
MANAGEMENT_START_ADDRESS = 192.168.204.102
|
|
||||||
MANAGEMENT_END_ADDRESS = 192.168.204.199
|
|
||||||
MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28
|
|
||||||
|
|
||||||
[cEXT_OAM]
|
|
||||||
EXTERNAL_OAM_MTU = 1500
|
|
||||||
EXTERNAL_OAM_SUBNET = 10.10.10.0/24
|
|
||||||
LAG_EXTERNAL_OAM_INTERFACE = no
|
|
||||||
EXTERNAL_OAM_INTERFACE = eth0
|
|
||||||
EXTERNAL_OAM_INTERFACE_NAME = eth0
|
|
||||||
EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1
|
|
||||||
EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2
|
|
||||||
EXTERNAL_OAM_0_ADDRESS = 10.10.10.3
|
|
||||||
EXTERNAL_OAM_1_ADDRESS = 10.10.10.4
|
|
||||||
|
|
||||||
[cREGION]
|
|
||||||
REGION_CONFIG = True
|
|
||||||
REGION_1_NAME = RegionOne
|
|
||||||
REGION_2_NAME = RegionTwo
|
|
||||||
ADMIN_USER_NAME = admin
|
|
||||||
ADMIN_USER_DOMAIN = Default
|
|
||||||
ADMIN_PROJECT_NAME = admin
|
|
||||||
ADMIN_PROJECT_DOMAIN = Default
|
|
||||||
SERVICE_PROJECT_NAME = service
|
|
||||||
KEYSTONE_SERVICE_NAME = keystone
|
|
||||||
KEYSTONE_SERVICE_TYPE = identity
|
|
||||||
PATCHING_USER_NAME = patching
|
|
||||||
PATCHING_PASSWORD = password2WO*
|
|
||||||
SYSINV_USER_NAME = sysinv
|
|
||||||
SYSINV_PASSWORD = password2WO*
|
|
||||||
SYSINV_SERVICE_NAME = sysinv
|
|
||||||
SYSINV_SERVICE_TYPE = platform
|
|
||||||
NFV_USER_NAME = vim
|
|
||||||
NFV_PASSWORD = password2WO*
|
|
||||||
MTCE_USER_NAME = mtce
|
|
||||||
MTCE_PASSWORD = password2WO*
|
|
||||||
FM_USER_NAME = fm
|
|
||||||
FM_PASSWORD = password2WO*
|
|
||||||
BARBICAN_USER_NAME = barbican
|
|
||||||
BARBICAN_PASSWORD = barbican2WO*
|
|
||||||
USER_DOMAIN_NAME = Default
|
|
||||||
PROJECT_DOMAIN_NAME = Default
|
|
||||||
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
|
||||||
KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
|
||||||
KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
|
||||||
KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
|
||||||
KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0
|
|
||||||
|
|
||||||
[cAUTHENTICATION]
|
|
||||||
ADMIN_PASSWORD = Li69nux*
|
|
||||||
|
|
|
@ -1,77 +0,0 @@
|
||||||
[SYSTEM]
|
|
||||||
SYSTEM_MODE = duplex
|
|
||||||
|
|
||||||
[STORAGE]
|
|
||||||
|
|
||||||
;LOGICAL_INTERFACE_<number>
|
|
||||||
; LAG_INTERFACE <Y/N>
|
|
||||||
; LAG_MODE One of 1) Active-backup policy
|
|
||||||
; 2) Balanced XOR policy
|
|
||||||
; 4) 802.3ad (LACP) policy
|
|
||||||
; Interface for pxebooting can only be LACP
|
|
||||||
; INTERFACE_MTU <mtu size>
|
|
||||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_1]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth1
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_2]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth0
|
|
||||||
|
|
||||||
[CLM_NETWORK]
|
|
||||||
;CLM_VLAN=123
|
|
||||||
CLM_IP_START_ADDRESS=192.168.204.102
|
|
||||||
CLM_IP_END_ADDRESS=192.168.204.199
|
|
||||||
CLM_CIDR=192.168.204.0/24
|
|
||||||
CLM_MULTICAST_CIDR=239.1.1.0/28
|
|
||||||
;CLM_GATEWAY=192.168.204.12
|
|
||||||
CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
|
||||||
|
|
||||||
[CAN_NETWORK]
|
|
||||||
;CAN_VLAN=
|
|
||||||
CAN_IP_START_ADDRESS=10.10.10.2
|
|
||||||
CAN_IP_END_ADDRESS=10.10.10.4
|
|
||||||
CAN_CIDR=10.10.10.0/24
|
|
||||||
CAN_GATEWAY=10.10.10.1
|
|
||||||
CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
|
||||||
|
|
||||||
;[REGION2_PXEBOOT_NETWORK]
|
|
||||||
;PXEBOOT_CIDR=192.168.203.0/24
|
|
||||||
|
|
||||||
[SHARED_SERVICES]
|
|
||||||
REGION_NAME=RegionOne
|
|
||||||
ADMIN_PROJECT_NAME=admin
|
|
||||||
ADMIN_USER_NAME=admin
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
||||||
KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0
|
|
||||||
KEYSTONE_SERVICE_NAME=keystone
|
|
||||||
KEYSTONE_SERVICE_TYPE=identity
|
|
||||||
SERVICE_PROJECT_NAME=service
|
|
||||||
|
|
||||||
[REGION_2_SERVICES]
|
|
||||||
REGION_NAME=RegionTwo
|
|
||||||
SYSINV_USER_NAME=sysinv
|
|
||||||
SYSINV_PASSWORD=password2WO*
|
|
||||||
SYSINV_SERVICE_NAME=sysinv
|
|
||||||
SYSINV_SERVICE_TYPE=platform
|
|
||||||
PATCHING_USER_NAME=patching
|
|
||||||
PATCHING_PASSWORD=password2WO*
|
|
||||||
PATCHING_SERVICE_NAME=patching
|
|
||||||
PATCHING_SERVICE_TYPE=patching
|
|
||||||
NFV_USER_NAME=vim
|
|
||||||
NFV_PASSWORD=password2WO*
|
|
||||||
MTCE_USER_NAME=mtce
|
|
||||||
MTCE_PASSWORD=password2WO*
|
|
||||||
FM_USER_NAME=fm
|
|
||||||
FM_PASSWORD=password2WO*
|
|
||||||
BARBICAN_USER_NAME=barbican
|
|
||||||
BARBICAN_PASSWORD=barbican2WO*
|
|
||||||
|
|
||||||
[VERSION]
|
|
||||||
RELEASE = TEST.SW.VERSION
|
|
|
@ -1,78 +0,0 @@
|
||||||
[SYSTEM]
|
|
||||||
SYSTEM_MODE = duplex
|
|
||||||
|
|
||||||
[STORAGE]
|
|
||||||
|
|
||||||
;LOGICAL_INTERFACE_<number>
|
|
||||||
; LAG_INTERFACE <Y/N>
|
|
||||||
; LAG_MODE One of 1) Active-backup policy
|
|
||||||
; 2) Balanced XOR policy
|
|
||||||
; 4) 802.3ad (LACP) policy
|
|
||||||
; Interface for pxebooting can only be LACP
|
|
||||||
; INTERFACE_MTU <mtu size>
|
|
||||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_1]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth1
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_2]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth0
|
|
||||||
|
|
||||||
[CLM_NETWORK]
|
|
||||||
;CLM_VLAN=123
|
|
||||||
CLM_IP_START_ADDRESS=192.168.204.102
|
|
||||||
CLM_IP_END_ADDRESS=192.168.204.199
|
|
||||||
CLM_CIDR=192.168.204.0/24
|
|
||||||
CLM_MULTICAST_CIDR=239.1.1.0/28
|
|
||||||
;CLM_GATEWAY=192.168.204.12
|
|
||||||
CLM_LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
|
||||||
|
|
||||||
[CAN_NETWORK]
|
|
||||||
;CAN_VLAN=
|
|
||||||
CAN_IP_FLOATING_ADDRESS=10.10.10.2
|
|
||||||
CAN_IP_UNIT_0_ADDRESS=10.10.10.3
|
|
||||||
CAN_IP_UNIT_1_ADDRESS=10.10.10.4
|
|
||||||
CAN_CIDR=10.10.10.0/24
|
|
||||||
CAN_GATEWAY=10.10.10.1
|
|
||||||
CAN_LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
|
||||||
|
|
||||||
;[REGION2_PXEBOOT_NETWORK]
|
|
||||||
;PXEBOOT_CIDR=192.168.203.0/24
|
|
||||||
|
|
||||||
[SHARED_SERVICES]
|
|
||||||
REGION_NAME=RegionOne
|
|
||||||
ADMIN_PROJECT_NAME=admin
|
|
||||||
ADMIN_USER_NAME=admin
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
||||||
KEYSTONE_ADMINURL=http://192.168.204.12:8081/keystone/admin/v2.0
|
|
||||||
KEYSTONE_SERVICE_NAME=keystone
|
|
||||||
KEYSTONE_SERVICE_TYPE=identity
|
|
||||||
SERVICE_PROJECT_NAME=service
|
|
||||||
|
|
||||||
[REGION_2_SERVICES]
|
|
||||||
REGION_NAME=RegionTwo
|
|
||||||
SYSINV_USER_NAME=sysinv
|
|
||||||
SYSINV_PASSWORD=password2WO*
|
|
||||||
SYSINV_SERVICE_NAME=sysinv
|
|
||||||
SYSINV_SERVICE_TYPE=platform
|
|
||||||
PATCHING_USER_NAME=patching
|
|
||||||
PATCHING_PASSWORD=password2WO*
|
|
||||||
PATCHING_SERVICE_NAME=patching
|
|
||||||
PATCHING_SERVICE_TYPE=patching
|
|
||||||
NFV_USER_NAME=vim
|
|
||||||
NFV_PASSWORD=password2WO*
|
|
||||||
MTCE_USER_NAME=mtce
|
|
||||||
MTCE_PASSWORD=password2WO*
|
|
||||||
FM_USER_NAME=fm
|
|
||||||
FM_PASSWORD=password2WO*
|
|
||||||
BARBICAN_USER_NAME=barbican
|
|
||||||
BARBICAN_PASSWORD=barbican2WO*
|
|
||||||
|
|
||||||
[VERSION]
|
|
||||||
RELEASE = TEST.SW.VERSION
|
|
|
@ -1,73 +0,0 @@
|
||||||
[cSYSTEM]
|
|
||||||
TIMEZONE = UTC
|
|
||||||
SYSTEM_MODE = duplex
|
|
||||||
|
|
||||||
[cPXEBOOT]
|
|
||||||
PXECONTROLLER_FLOATING_HOSTNAME = pxecontroller
|
|
||||||
|
|
||||||
[cMGMT]
|
|
||||||
MANAGEMENT_MTU = 1500
|
|
||||||
MANAGEMENT_SUBNET = 192.168.204.0/24
|
|
||||||
LAG_MANAGEMENT_INTERFACE = no
|
|
||||||
MANAGEMENT_INTERFACE = eth1
|
|
||||||
MANAGEMENT_INTERFACE_NAME = eth1
|
|
||||||
CONTROLLER_FLOATING_ADDRESS = 192.168.204.102
|
|
||||||
CONTROLLER_0_ADDRESS = 192.168.204.103
|
|
||||||
CONTROLLER_1_ADDRESS = 192.168.204.104
|
|
||||||
NFS_MANAGEMENT_ADDRESS_1 = 192.168.204.105
|
|
||||||
NFS_MANAGEMENT_ADDRESS_2 = 192.168.204.106
|
|
||||||
CONTROLLER_FLOATING_HOSTNAME = controller
|
|
||||||
CONTROLLER_HOSTNAME_PREFIX = controller-
|
|
||||||
OAMCONTROLLER_FLOATING_HOSTNAME = oamcontroller
|
|
||||||
DYNAMIC_ADDRESS_ALLOCATION = no
|
|
||||||
MANAGEMENT_START_ADDRESS = 192.168.204.102
|
|
||||||
MANAGEMENT_END_ADDRESS = 192.168.204.199
|
|
||||||
MANAGEMENT_MULTICAST_SUBNET = 239.1.1.0/28
|
|
||||||
|
|
||||||
[cEXT_OAM]
|
|
||||||
EXTERNAL_OAM_MTU = 1500
|
|
||||||
EXTERNAL_OAM_SUBNET = 10.10.10.0/24
|
|
||||||
LAG_EXTERNAL_OAM_INTERFACE = no
|
|
||||||
EXTERNAL_OAM_INTERFACE = eth0
|
|
||||||
EXTERNAL_OAM_INTERFACE_NAME = eth0
|
|
||||||
EXTERNAL_OAM_GATEWAY_ADDRESS = 10.10.10.1
|
|
||||||
EXTERNAL_OAM_FLOATING_ADDRESS = 10.10.10.2
|
|
||||||
EXTERNAL_OAM_0_ADDRESS = 10.10.10.3
|
|
||||||
EXTERNAL_OAM_1_ADDRESS = 10.10.10.4
|
|
||||||
|
|
||||||
[cREGION]
|
|
||||||
REGION_CONFIG = True
|
|
||||||
REGION_1_NAME = RegionOne
|
|
||||||
REGION_2_NAME = RegionTwo
|
|
||||||
ADMIN_USER_NAME = admin
|
|
||||||
ADMIN_USER_DOMAIN = Default
|
|
||||||
ADMIN_PROJECT_NAME = admin
|
|
||||||
ADMIN_PROJECT_DOMAIN = Default
|
|
||||||
SERVICE_PROJECT_NAME = service
|
|
||||||
KEYSTONE_SERVICE_NAME = keystone
|
|
||||||
KEYSTONE_SERVICE_TYPE = identity
|
|
||||||
PATCHING_USER_NAME = patching
|
|
||||||
PATCHING_PASSWORD = password2WO*
|
|
||||||
SYSINV_USER_NAME = sysinv
|
|
||||||
SYSINV_PASSWORD = password2WO*
|
|
||||||
SYSINV_SERVICE_NAME = sysinv
|
|
||||||
SYSINV_SERVICE_TYPE = platform
|
|
||||||
NFV_USER_NAME = vim
|
|
||||||
NFV_PASSWORD = password2WO*
|
|
||||||
MTCE_USER_NAME = mtce
|
|
||||||
MTCE_PASSWORD = password2WO*
|
|
||||||
FM_USER_NAME = fm
|
|
||||||
FM_PASSWORD = password2WO*
|
|
||||||
BARBICAN_USER_NAME = barbican
|
|
||||||
BARBICAN_PASSWORD = barbican2WO*
|
|
||||||
USER_DOMAIN_NAME = Default
|
|
||||||
PROJECT_DOMAIN_NAME = Default
|
|
||||||
KEYSTONE_AUTH_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
|
||||||
KEYSTONE_IDENTITY_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
|
||||||
KEYSTONE_ADMIN_URI = http://192.168.204.12:8081/keystone/admin/v2.0
|
|
||||||
KEYSTONE_INTERNAL_URI = http://192.168.204.12:8081/keystone/main/v2.0
|
|
||||||
KEYSTONE_PUBLIC_URI = http://10.10.10.2:8081/keystone/main/v2.0
|
|
||||||
|
|
||||||
[cAUTHENTICATION]
|
|
||||||
ADMIN_PASSWORD = Li69nux*
|
|
||||||
|
|
|
@ -1,55 +0,0 @@
|
||||||
[SYSTEM]
|
|
||||||
SYSTEM_MODE = duplex
|
|
||||||
|
|
||||||
;LOGICAL_INTERFACE_<number>
|
|
||||||
; LAG_INTERFACE <Y/N>
|
|
||||||
; LAG_MODE One of 1) Active-backup policy
|
|
||||||
; 2) Balanced XOR policy
|
|
||||||
; 4) 802.3ad (LACP) policy
|
|
||||||
; Interface for pxebooting can only be LACP
|
|
||||||
; INTERFACE_MTU <mtu size>
|
|
||||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_1]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth1
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_2]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth0
|
|
||||||
|
|
||||||
[MGMT_NETWORK]
|
|
||||||
;VLAN=123
|
|
||||||
IP_START_ADDRESS=192.168.204.2
|
|
||||||
IP_END_ADDRESS=192.168.204.99
|
|
||||||
CIDR=192.168.204.0/24
|
|
||||||
MULTICAST_CIDR=239.1.1.0/28
|
|
||||||
DYNAMIC_ALLOCATION=Y
|
|
||||||
;GATEWAY=192.168.204.12
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
|
||||||
|
|
||||||
[OAM_NETWORK]
|
|
||||||
;VLAN=
|
|
||||||
IP_START_ADDRESS=10.10.10.2
|
|
||||||
IP_END_ADDRESS=10.10.10.4
|
|
||||||
CIDR=10.10.10.0/24
|
|
||||||
GATEWAY=10.10.10.1
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
|
||||||
|
|
||||||
;[PXEBOOT_NETWORK]
|
|
||||||
;PXEBOOT_CIDR=192.168.203.0/24
|
|
||||||
|
|
||||||
;[BOARD_MANAGEMENT_NETWORK]
|
|
||||||
;VLAN=1
|
|
||||||
;MTU=1496
|
|
||||||
;SUBNET=192.168.203.0/24
|
|
||||||
|
|
||||||
[AUTHENTICATION]
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
||||||
|
|
||||||
[VERSION]
|
|
||||||
RELEASE = TEST.SW.VERSION
|
|
|
@ -1,53 +0,0 @@
|
||||||
;LOGICAL_INTERFACE_<number>
|
|
||||||
; LAG_INTERFACE <Y/N>
|
|
||||||
; LAG_MODE One of 1) Active-backup policy
|
|
||||||
; 2) Balanced XOR policy
|
|
||||||
; 4) 802.3ad (LACP) policy
|
|
||||||
; Interface for pxebooting can only be LACP
|
|
||||||
; INTERFACE_MTU <mtu size>
|
|
||||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_1]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth1
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_2]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth0
|
|
||||||
|
|
||||||
[MGMT_NETWORK]
|
|
||||||
VLAN=123
|
|
||||||
CIDR=1234::/64
|
|
||||||
MULTICAST_CIDR=ff08::1:1:0/124
|
|
||||||
DYNAMIC_ALLOCATION=Y
|
|
||||||
;GATEWAY=192.168.204.12
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
|
||||||
|
|
||||||
[OAM_NETWORK]
|
|
||||||
;VLAN=
|
|
||||||
;IP_START_ADDRESS=abcd::2
|
|
||||||
;IP_END_ADDRESS=abcd::4
|
|
||||||
IP_FLOATING_ADDRESS=abcd::2
|
|
||||||
IP_UNIT_0_ADDRESS=abcd::3
|
|
||||||
IP_UNIT_1_ADDRESS=abcd::4
|
|
||||||
CIDR=abcd::/64
|
|
||||||
GATEWAY=abcd::1
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
|
||||||
|
|
||||||
[PXEBOOT_NETWORK]
|
|
||||||
PXEBOOT_CIDR=192.168.203.0/24
|
|
||||||
|
|
||||||
;[BOARD_MANAGEMENT_NETWORK]
|
|
||||||
;VLAN=1
|
|
||||||
;MTU=1496
|
|
||||||
;SUBNET=192.168.203.0/24
|
|
||||||
|
|
||||||
[AUTHENTICATION]
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
||||||
|
|
||||||
[VERSION]
|
|
||||||
RELEASE = TEST.SW.VERSION
|
|
|
@ -1,70 +0,0 @@
|
||||||
[LOGICAL_INTERFACE_1]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_LINK_CAPACITY=1000
|
|
||||||
INTERFACE_PORTS=eth1
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_2]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
;INTERFACE_LINK_CAPACITY=
|
|
||||||
INTERFACE_PORTS=eth0
|
|
||||||
|
|
||||||
[MGMT_NETWORK]
|
|
||||||
;VLAN=123
|
|
||||||
CIDR=192.168.204.0/24
|
|
||||||
MULTICAST_CIDR=239.1.1.0/28
|
|
||||||
DYNAMIC_ALLOCATION=Y
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
|
||||||
|
|
||||||
[CLUSTER_NETWORK]
|
|
||||||
CIDR=192.168.206.0/24
|
|
||||||
DYNAMIC_ALLOCATION=Y
|
|
||||||
IP_START_ADDRESS=192.168.206.2
|
|
||||||
IP_END_ADDRESS=192.168.206.245
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
|
||||||
|
|
||||||
[OAM_NETWORK]
|
|
||||||
;VLAN=
|
|
||||||
;IP_START_ADDRESS=10.10.10.2
|
|
||||||
;IP_END_ADDRESS=10.10.10.4
|
|
||||||
IP_FLOATING_ADDRESS=10.10.10.20
|
|
||||||
IP_UNIT_0_ADDRESS=10.10.10.30
|
|
||||||
IP_UNIT_1_ADDRESS=10.10.10.40
|
|
||||||
CIDR=10.10.10.0/24
|
|
||||||
GATEWAY=10.10.10.1
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
|
||||||
|
|
||||||
[DNS]
|
|
||||||
# DNS Configuration
|
|
||||||
NAMESERVER_1=1.2.3.4
|
|
||||||
NAMESERVER_2=5.6.7.8
|
|
||||||
|
|
||||||
[DOCKER_PROXY]
|
|
||||||
# Docker Proxy Configuration
|
|
||||||
DOCKER_HTTP_PROXY=http://proxy.com:123
|
|
||||||
DOCKER_HTTPS_PROXY=https://proxy.com:123
|
|
||||||
DOCKER_NO_PROXY=localhost,127.0.0.1,192.168.204.2
|
|
||||||
|
|
||||||
[DOCKER_REGISTRY]
|
|
||||||
# Docker Registry Configuration
|
|
||||||
DOCKER_K8S_REGISTRY=my.registry.com:5000
|
|
||||||
DOCKER_GCR_REGISTRY=my.registry.com
|
|
||||||
DOCKER_QUAY_REGISTRY=1.2.3.4:5000
|
|
||||||
DOCKER_DOCKER_REGISTRY=[1:2:3:4:a:b:c:d]:5000
|
|
||||||
IS_SECURE_REGISTRY=False
|
|
||||||
|
|
||||||
;[PXEBOOT_NETWORK]
|
|
||||||
;PXEBOOT_CIDR=192.168.203.0/24
|
|
||||||
|
|
||||||
;[BOARD_MANAGEMENT_NETWORK]
|
|
||||||
;VLAN=1
|
|
||||||
;MTU=1496
|
|
||||||
;SUBNET=192.168.203.0/24
|
|
||||||
|
|
||||||
[AUTHENTICATION]
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
||||||
|
|
||||||
[VERSION]
|
|
||||||
RELEASE = TEST.SW.VERSION
|
|
|
@ -1,55 +0,0 @@
|
||||||
[SYSTEM]
|
|
||||||
SYSTEM_MODE=duplex
|
|
||||||
|
|
||||||
;LOGICAL_INTERFACE_<number>
|
|
||||||
; LAG_INTERFACE <Y/N>
|
|
||||||
; LAG_MODE One of 1) Active-backup policy
|
|
||||||
; 2) Balanced XOR policy
|
|
||||||
; 4) 802.3ad (LACP) policy
|
|
||||||
; Interface for pxebooting can only be LACP
|
|
||||||
; INTERFACE_MTU <mtu size>
|
|
||||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_1]
|
|
||||||
LAG_INTERFACE=Y
|
|
||||||
LAG_MODE=4
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth1,eth2
|
|
||||||
|
|
||||||
[MGMT_NETWORK]
|
|
||||||
VLAN=123
|
|
||||||
IP_START_ADDRESS=192.168.204.102
|
|
||||||
IP_END_ADDRESS=192.168.204.199
|
|
||||||
CIDR=192.168.204.0/24
|
|
||||||
MULTICAST_CIDR=239.1.1.0/28
|
|
||||||
GATEWAY=192.168.204.12
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
|
||||||
|
|
||||||
[CLUSTER_NETWORK]
|
|
||||||
VLAN=126
|
|
||||||
IP_START_ADDRESS=192.168.206.102
|
|
||||||
IP_END_ADDRESS=192.168.206.199
|
|
||||||
CIDR=192.168.206.0/24
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
|
||||||
|
|
||||||
[OAM_NETWORK]
|
|
||||||
VLAN=125
|
|
||||||
IP_START_ADDRESS=10.10.10.2
|
|
||||||
IP_END_ADDRESS=10.10.10.4
|
|
||||||
CIDR=10.10.10.0/24
|
|
||||||
;GATEWAY=10.10.10.1
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
|
||||||
|
|
||||||
[PXEBOOT_NETWORK]
|
|
||||||
PXEBOOT_CIDR=192.168.203.0/24
|
|
||||||
|
|
||||||
;[BOARD_MANAGEMENT_NETWORK]
|
|
||||||
;VLAN=1
|
|
||||||
;MTU=1496
|
|
||||||
;SUBNET=192.168.203.0/24
|
|
||||||
|
|
||||||
[AUTHENTICATION]
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
||||||
|
|
||||||
[VERSION]
|
|
||||||
RELEASE = TEST.SW.VERSION
|
|
|
@ -1,49 +0,0 @@
|
||||||
[LOGICAL_INTERFACE_1]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_LINK_CAPACITY=1000
|
|
||||||
INTERFACE_PORTS=eth1
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_2]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
;INTERFACE_LINK_CAPACITY=
|
|
||||||
INTERFACE_PORTS=eth0
|
|
||||||
|
|
||||||
[PXEBOOT_NETWORK]
|
|
||||||
PXEBOOT_CIDR=192.168.102.0/24
|
|
||||||
IP_START_ADDRESS=192.168.102.32
|
|
||||||
IP_END_ADDRESS=192.168.102.54
|
|
||||||
|
|
||||||
[MGMT_NETWORK]
|
|
||||||
VLAN=123
|
|
||||||
CIDR=192.168.204.0/24
|
|
||||||
MULTICAST_CIDR=239.1.1.0/28
|
|
||||||
DYNAMIC_ALLOCATION=Y
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
|
||||||
|
|
||||||
[OAM_NETWORK]
|
|
||||||
;VLAN=
|
|
||||||
;IP_START_ADDRESS=10.10.10.2
|
|
||||||
;IP_END_ADDRESS=10.10.10.4
|
|
||||||
IP_FLOATING_ADDRESS=10.10.10.20
|
|
||||||
IP_UNIT_0_ADDRESS=10.10.10.30
|
|
||||||
IP_UNIT_1_ADDRESS=10.10.10.40
|
|
||||||
CIDR=10.10.10.0/24
|
|
||||||
GATEWAY=10.10.10.1
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
|
||||||
|
|
||||||
;[PXEBOOT_NETWORK]
|
|
||||||
;PXEBOOT_CIDR=192.168.203.0/24
|
|
||||||
|
|
||||||
;[BOARD_MANAGEMENT_NETWORK]
|
|
||||||
;VLAN=1
|
|
||||||
;MTU=1496
|
|
||||||
;SUBNET=192.168.203.0/24
|
|
||||||
|
|
||||||
[AUTHENTICATION]
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
||||||
|
|
||||||
[VERSION]
|
|
||||||
RELEASE = TEST.SW.VERSION
|
|
|
@ -1,51 +0,0 @@
|
||||||
;LOGICAL_INTERFACE_<number>
|
|
||||||
; LAG_INTERFACE <Y/N>
|
|
||||||
; LAG_MODE One of 1) Active-backup policy
|
|
||||||
; 2) Balanced XOR policy
|
|
||||||
; 4) 802.3ad (LACP) policy
|
|
||||||
; Interface for pxebooting can only be LACP
|
|
||||||
; INTERFACE_MTU <mtu size>
|
|
||||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_1]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth1
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_2]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth0
|
|
||||||
|
|
||||||
[MGMT_NETWORK]
|
|
||||||
;VLAN=123
|
|
||||||
IP_START_ADDRESS=192.168.204.102
|
|
||||||
IP_END_ADDRESS=192.168.204.199
|
|
||||||
CIDR=192.168.204.0/24
|
|
||||||
MULTICAST_CIDR=239.1.1.0/28
|
|
||||||
;GATEWAY=192.168.204.12
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
|
||||||
|
|
||||||
[OAM_NETWORK]
|
|
||||||
;VLAN=
|
|
||||||
IP_START_ADDRESS=10.10.10.2
|
|
||||||
IP_END_ADDRESS=10.10.10.4
|
|
||||||
CIDR=10.10.10.0/24
|
|
||||||
GATEWAY=10.10.10.1
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
|
||||||
|
|
||||||
;[PXEBOOT_NETWORK]
|
|
||||||
;PXEBOOT_CIDR=192.168.203.0/24
|
|
||||||
|
|
||||||
[BOARD_MANAGEMENT_NETWORK]
|
|
||||||
VLAN=1
|
|
||||||
MTU=1496
|
|
||||||
SUBNET=192.168.203.0/24
|
|
||||||
|
|
||||||
[AUTHENTICATION]
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
||||||
|
|
||||||
[VERSION]
|
|
||||||
RELEASE = TEST.SW.VERSION
|
|
|
@ -1,63 +0,0 @@
|
||||||
;[DNS]
|
|
||||||
;NAMESERVER_1=8.8.8.8
|
|
||||||
;NAMESERVER_2=8.8.4.4
|
|
||||||
;NAMESERVER_3=
|
|
||||||
|
|
||||||
;[NTP]
|
|
||||||
;NTP_SERVER_1=0.pool.ntp.org
|
|
||||||
;NTP_SERVER_2=1.pool.ntp.org
|
|
||||||
;NTP_SERVER_3=2.pool.ntp.org
|
|
||||||
|
|
||||||
;LOGICAL_INTERFACE_<number>
|
|
||||||
; LAG_INTERFACE <Y/N>
|
|
||||||
; LAG_MODE One of 1) Active-backup policy
|
|
||||||
; 2) Balanced XOR policy
|
|
||||||
; 4) 802.3ad (LACP) policy
|
|
||||||
; Interface for pxebooting can only be LACP
|
|
||||||
; INTERFACE_MTU <mtu size>
|
|
||||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_1]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth1
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_2]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth0
|
|
||||||
|
|
||||||
[MGMT_NETWORK]
|
|
||||||
;VLAN=123
|
|
||||||
CIDR=192.168.204.0/24
|
|
||||||
MULTICAST_CIDR=239.1.1.0/28
|
|
||||||
DYNAMIC_ALLOCATION=Y
|
|
||||||
;GATEWAY=192.168.204.12
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
|
||||||
|
|
||||||
[OAM_NETWORK]
|
|
||||||
;VLAN=
|
|
||||||
;IP_START_ADDRESS=10.10.10.2
|
|
||||||
;IP_END_ADDRESS=10.10.10.4
|
|
||||||
IP_FLOATING_ADDRESS=10.10.10.20
|
|
||||||
IP_UNIT_0_ADDRESS=10.10.10.30
|
|
||||||
IP_UNIT_1_ADDRESS=10.10.10.40
|
|
||||||
CIDR=10.10.10.0/24
|
|
||||||
GATEWAY=10.10.10.1
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
|
||||||
|
|
||||||
;[PXEBOOT_NETWORK]
|
|
||||||
;PXEBOOT_CIDR=192.168.203.0/24
|
|
||||||
|
|
||||||
;[BOARD_MANAGEMENT_NETWORK]
|
|
||||||
;VLAN=1
|
|
||||||
;MTU=1496
|
|
||||||
;SUBNET=192.168.203.0/24
|
|
||||||
|
|
||||||
[AUTHENTICATION]
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
||||||
|
|
||||||
[VERSION]
|
|
||||||
RELEASE = TEST.SW.VERSION
|
|
|
@ -1,46 +0,0 @@
|
||||||
;[DNS]
|
|
||||||
;NAMESERVER_1=8.8.8.8
|
|
||||||
;NAMESERVER_2=8.8.4.4
|
|
||||||
;NAMESERVER_3=
|
|
||||||
|
|
||||||
;[NTP]
|
|
||||||
;NTP_SERVER_1=0.pool.ntp.org
|
|
||||||
;NTP_SERVER_2=1.pool.ntp.org
|
|
||||||
;NTP_SERVER_3=2.pool.ntp.org
|
|
||||||
|
|
||||||
;LOGICAL_INTERFACE_<number>
|
|
||||||
; LAG_INTERFACE <Y/N>
|
|
||||||
; LAG_MODE One of 1) Active-backup policy
|
|
||||||
; 2) Balanced XOR policy
|
|
||||||
; 4) 802.3ad (LACP) policy
|
|
||||||
; Interface for pxebooting can only be LACP
|
|
||||||
; INTERFACE_MTU <mtu size>
|
|
||||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_1]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth1
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_2]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth0
|
|
||||||
|
|
||||||
[OAM_NETWORK]
|
|
||||||
IP_ADDRESS=10.10.10.20
|
|
||||||
CIDR=10.10.10.0/24
|
|
||||||
GATEWAY=10.10.10.1
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
|
||||||
|
|
||||||
[AUTHENTICATION]
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
||||||
|
|
||||||
[VERSION]
|
|
||||||
RELEASE = TEST.SW.VERSION
|
|
||||||
|
|
||||||
[SYSTEM]
|
|
||||||
SYSTEM_TYPE=All-in-one
|
|
||||||
SYSTEM_MODE=simplex
|
|
|
@ -1,24 +0,0 @@
|
||||||
[LOGICAL_INTERFACE_1]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth0
|
|
||||||
|
|
||||||
[MGMT_NETWORK]
|
|
||||||
CIDR=192.168.42.0/28
|
|
||||||
|
|
||||||
[OAM_NETWORK]
|
|
||||||
IP_ADDRESS=10.10.10.20
|
|
||||||
CIDR=10.10.10.0/24
|
|
||||||
GATEWAY=10.10.10.1
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
|
||||||
|
|
||||||
[AUTHENTICATION]
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
||||||
|
|
||||||
[VERSION]
|
|
||||||
RELEASE = TEST.SW.VERSION
|
|
||||||
|
|
||||||
[SYSTEM]
|
|
||||||
SYSTEM_TYPE=All-in-one
|
|
||||||
SYSTEM_MODE=simplex
|
|
|
@ -1,52 +0,0 @@
|
||||||
;LOGICAL_INTERFACE_<number>
|
|
||||||
; LAG_INTERFACE <Y/N>
|
|
||||||
; LAG_MODE One of 1) Active-backup policy
|
|
||||||
; 2) Balanced XOR policy
|
|
||||||
; 4) 802.3ad (LACP) policy
|
|
||||||
; Interface for pxebooting can only be LACP
|
|
||||||
; INTERFACE_MTU <mtu size>
|
|
||||||
; INTERFACE_PORTS <comma separated list of ethernet interfaces>
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_1]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth1
|
|
||||||
|
|
||||||
[LOGICAL_INTERFACE_2]
|
|
||||||
LAG_INTERFACE=N
|
|
||||||
;LAG_MODE=
|
|
||||||
INTERFACE_MTU=1500
|
|
||||||
INTERFACE_PORTS=eth0
|
|
||||||
|
|
||||||
[MGMT_NETWORK]
|
|
||||||
;VLAN=123
|
|
||||||
IP_START_ADDRESS=192.168.204.20
|
|
||||||
IP_END_ADDRESS=192.168.204.99
|
|
||||||
CIDR=192.168.204.0/24
|
|
||||||
MULTICAST_CIDR=239.1.1.0/28
|
|
||||||
DYNAMIC_ALLOCATION=N
|
|
||||||
;GATEWAY=192.168.204.12
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_1
|
|
||||||
|
|
||||||
[OAM_NETWORK]
|
|
||||||
;VLAN=
|
|
||||||
IP_START_ADDRESS=10.10.10.2
|
|
||||||
IP_END_ADDRESS=10.10.10.4
|
|
||||||
CIDR=10.10.10.0/24
|
|
||||||
GATEWAY=10.10.10.1
|
|
||||||
LOGICAL_INTERFACE=LOGICAL_INTERFACE_2
|
|
||||||
|
|
||||||
;[PXEBOOT_NETWORK]
|
|
||||||
;PXEBOOT_CIDR=192.168.203.0/24
|
|
||||||
|
|
||||||
;[BOARD_MANAGEMENT_NETWORK]
|
|
||||||
;VLAN=1
|
|
||||||
;MTU=1496
|
|
||||||
;SUBNET=192.168.203.0/24
|
|
||||||
|
|
||||||
[AUTHENTICATION]
|
|
||||||
ADMIN_PASSWORD=Li69nux*
|
|
||||||
|
|
||||||
[VERSION]
|
|
||||||
RELEASE = TEST.SW.VERSION
|
|
|
@ -1,103 +0,0 @@
|
||||||
"""
|
|
||||||
Copyright (c) 2014 Wind River Systems, Inc.
|
|
||||||
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
import difflib
|
|
||||||
import filecmp
|
|
||||||
import os
|
|
||||||
from mock import patch
|
|
||||||
|
|
||||||
import controllerconfig.configassistant as ca
|
|
||||||
import controllerconfig.common.constants as constants
|
|
||||||
|
|
||||||
|
|
||||||
@patch('controllerconfig.configassistant.get_rootfs_node')
|
|
||||||
@patch('controllerconfig.configassistant.get_net_device_list')
|
|
||||||
def _test_answerfile(tmpdir, filename,
|
|
||||||
mock_get_net_device_list,
|
|
||||||
mock_get_rootfs_node,
|
|
||||||
compare_results=True,
|
|
||||||
ca_options={}):
|
|
||||||
""" Test import and generation of answerfile """
|
|
||||||
mock_get_net_device_list.return_value = \
|
|
||||||
['eth0', 'eth1', 'eth2']
|
|
||||||
mock_get_rootfs_node.return_value = '/dev/sda'
|
|
||||||
|
|
||||||
assistant = ca.ConfigAssistant(**ca_options)
|
|
||||||
|
|
||||||
# Create the path to the answerfile
|
|
||||||
answerfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/", filename)
|
|
||||||
|
|
||||||
# Input the config from the answerfile
|
|
||||||
assistant.input_config_from_file(answerfile)
|
|
||||||
|
|
||||||
# Test the display method
|
|
||||||
print("Output from display_config:")
|
|
||||||
assistant.display_config()
|
|
||||||
|
|
||||||
# Ensure we can write the configuration
|
|
||||||
constants.CONFIG_WORKDIR = os.path.join(str(tmpdir), 'config_workdir')
|
|
||||||
constants.CGCS_CONFIG_FILE = os.path.join(constants.CONFIG_WORKDIR,
|
|
||||||
'cgcs_config')
|
|
||||||
assistant.write_config_file()
|
|
||||||
|
|
||||||
# Add the password to the generated file so it can be compared with the
|
|
||||||
# answerfile
|
|
||||||
with open(constants.CGCS_CONFIG_FILE, 'a') as f:
|
|
||||||
f.write("\n[cAUTHENTICATION]\nADMIN_PASSWORD=Li69nux*\n")
|
|
||||||
|
|
||||||
# Do a diff between the answerfile and the generated config file
|
|
||||||
print("\n\nDiff of answerfile vs. generated config file:\n")
|
|
||||||
with open(answerfile) as a, open(constants.CGCS_CONFIG_FILE) as b:
|
|
||||||
a_lines = a.readlines()
|
|
||||||
b_lines = b.readlines()
|
|
||||||
|
|
||||||
differ = difflib.Differ()
|
|
||||||
diff = differ.compare(a_lines, b_lines)
|
|
||||||
print(''.join(diff))
|
|
||||||
|
|
||||||
if compare_results:
|
|
||||||
# Fail the testcase if the answerfile and generated config file don't
|
|
||||||
# match.
|
|
||||||
assert filecmp.cmp(answerfile, constants.CGCS_CONFIG_FILE)
|
|
||||||
|
|
||||||
|
|
||||||
def test_answerfile_default(tmpdir):
|
|
||||||
""" Test import of answerfile with default values """
|
|
||||||
|
|
||||||
_test_answerfile(tmpdir, "cgcs_config.default")
|
|
||||||
|
|
||||||
|
|
||||||
def test_answerfile_ipv6(tmpdir):
|
|
||||||
""" Test import of answerfile with ipv6 oam values """
|
|
||||||
|
|
||||||
_test_answerfile(tmpdir, "cgcs_config.ipv6")
|
|
||||||
|
|
||||||
|
|
||||||
def test_answerfile_ceph(tmpdir):
|
|
||||||
""" Test import of answerfile with ceph backend values """
|
|
||||||
|
|
||||||
_test_answerfile(tmpdir, "cgcs_config.ceph")
|
|
||||||
|
|
||||||
|
|
||||||
def test_answerfile_region(tmpdir):
|
|
||||||
""" Test import of answerfile with region values """
|
|
||||||
|
|
||||||
_test_answerfile(tmpdir, "cgcs_config.region")
|
|
||||||
|
|
||||||
|
|
||||||
def test_answerfile_region_nuage_vrs(tmpdir):
|
|
||||||
""" Test import of answerfile with region values for nuage_vrs"""
|
|
||||||
|
|
||||||
_test_answerfile(tmpdir, "cgcs_config.region_nuage_vrs")
|
|
||||||
|
|
||||||
|
|
||||||
def test_answerfile_kubernetes(tmpdir):
|
|
||||||
""" Test import of answerfile with kubernetes values """
|
|
||||||
|
|
||||||
_test_answerfile(tmpdir, "cgcs_config.kubernetes",
|
|
||||||
ca_options={"kubernetes": True})
|
|
|
@ -1,759 +0,0 @@
|
||||||
"""
|
|
||||||
Copyright (c) 2014-2019 Wind River Systems, Inc.
|
|
||||||
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import print_function
|
|
||||||
from six.moves import configparser
|
|
||||||
import difflib
|
|
||||||
import filecmp
|
|
||||||
import fileinput
|
|
||||||
import mock
|
|
||||||
from mock import patch
|
|
||||||
import os
|
|
||||||
import pytest
|
|
||||||
import shutil
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import controllerconfig.common.exceptions as exceptions
|
|
||||||
from controllerconfig import REGION_CONFIG
|
|
||||||
from controllerconfig import validate
|
|
||||||
import controllerconfig.common.keystone as keystone
|
|
||||||
from controllerconfig.tests import test_answerfile
|
|
||||||
|
|
||||||
sys.modules['fm_core'] = mock.Mock()
|
|
||||||
|
|
||||||
import controllerconfig.systemconfig as cr # noqa: E402
|
|
||||||
|
|
||||||
FAKE_SERVICE_DATA = {u'services': [
|
|
||||||
{u'type': u'keystore', u'description': u'Barbican Key Management Service',
|
|
||||||
u'enabled': True, u'id': u'9029af23540f4eecb0b7f70ac5e00152',
|
|
||||||
u'name': u'barbican'},
|
|
||||||
{u'type': u'network', u'description': u'OpenStack Networking service',
|
|
||||||
u'enabled': True, u'id': u'85a8a3342a644df193af4b68d5b65ce5',
|
|
||||||
u'name': u'neutron'}, {u'type': u'cloudformation',
|
|
||||||
u'description':
|
|
||||||
u'OpenStack Cloudformation Service',
|
|
||||||
u'enabled': True,
|
|
||||||
u'id': u'abbf431acb6d45919cfbefe55a0f27fa',
|
|
||||||
u'name': u'heat-cfn'},
|
|
||||||
{u'type': u'object-store', u'description': u'OpenStack object-store',
|
|
||||||
u'enabled': True, u'id': u'd588956f759f4bbda9e65a1019902b9c',
|
|
||||||
u'name': u'swift'},
|
|
||||||
{u'type': u'volumev2',
|
|
||||||
u'description': u'OpenStack Volume Service v2.0 API',
|
|
||||||
u'enabled': True, u'id': u'e6e356112daa4af588d9b9dadcf98bc4',
|
|
||||||
u'name': u'cinderv2'},
|
|
||||||
{u'type': u'volume', u'description': u'OpenStack Volume Service',
|
|
||||||
u'enabled': True, u'id': u'505aa37457774e55b545654aa8630822',
|
|
||||||
u'name': u'cinder'}, {u'type': u'orchestration',
|
|
||||||
u'description': u'OpenStack Orchestration Service',
|
|
||||||
u'enabled': True,
|
|
||||||
u'id': u'5765bee52eec43bb8e0632ecb225d0e3',
|
|
||||||
u'name': u'heat'},
|
|
||||||
{u'type': u'compute', u'description': u'OpenStack Compute Service',
|
|
||||||
u'enabled': True, u'id': u'9c46a6ea929f4c52bc92dd9bb9f852ac',
|
|
||||||
u'name': u'nova'},
|
|
||||||
{u'type': u'identity', u'description': u'OpenStack Identity',
|
|
||||||
u'enabled': True, u'id': u'1fe7b1de187b47228fe853fbbd149664',
|
|
||||||
u'name': u'keystone'},
|
|
||||||
{u'type': u'image', u'description': u'OpenStack Image Service',
|
|
||||||
u'enabled': True, u'id': u'd41750c98a864fdfb25c751b4ad84996',
|
|
||||||
u'name': u'glance'},
|
|
||||||
{u'type': u'database', u'description': u'Trove Database As A Service',
|
|
||||||
u'enabled': True, u'id': u'82265e39a77b4097bd8aee4f78e13867',
|
|
||||||
u'name': u'trove'},
|
|
||||||
{u'type': u'patching', u'description': u'Patching Service',
|
|
||||||
u'enabled': True, u'id': u'8515c4f28f9346199eb8704bca4f5db4',
|
|
||||||
u'name': u'patching'},
|
|
||||||
{u'type': u'platform', u'description': u'SysInv Service', u'enabled': True,
|
|
||||||
u'id': u'08758bed8d894ddaae744a97db1080b3', u'name': u'sysinv'},
|
|
||||||
{u'type': u'computev3', u'description': u'Openstack Compute Service v3',
|
|
||||||
u'enabled': True, u'id': u'959f2214543a47549ffd8c66f98d27d4',
|
|
||||||
u'name': u'novav3'}]}
|
|
||||||
|
|
||||||
FAKE_ENDPOINT_DATA = {u'endpoints': [
|
|
||||||
{u'url': u'http://192.168.204.12:8776/v1/$(tenant_id)s',
|
|
||||||
u'region': u'RegionOne', u'enabled': True,
|
|
||||||
u'service_id': u'505aa37457774e55b545654aa8630822',
|
|
||||||
u'id': u'de19beb4a4924aa1ba25af3ee64e80a0',
|
|
||||||
u'interface': u'admin'},
|
|
||||||
{u'url': u'http://192.168.204.12:8776/v1/$(tenant_id)s',
|
|
||||||
u'region': u'RegionOne', u'enabled': True,
|
|
||||||
u'service_id': u'505aa37457774e55b545654aa8630822',
|
|
||||||
u'id': u'de19beb4a4924aa1ba25af3ee64e80a1',
|
|
||||||
u'interface': u'internal'},
|
|
||||||
{u'url': u'http://10.10.10.2:8776/v1/$(tenant_id)s',
|
|
||||||
u'region': u'RegionOne', u'enabled': True,
|
|
||||||
u'service_id': u'505aa37457774e55b545654aa8630822',
|
|
||||||
u'id': u'de19beb4a4924aa1ba25af3ee64e80a2',
|
|
||||||
u'interface': u'public'},
|
|
||||||
|
|
||||||
{u'url': u'http://192.168.204.102:8774/v2/%(tenant_id)s',
|
|
||||||
u'region': u'RegionTwo', u'enabled': True,
|
|
||||||
u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac',
|
|
||||||
u'id': u'373259a6bbcf493b86c9f9530e86d323',
|
|
||||||
u'interface': u'admin'},
|
|
||||||
{u'url': u'http://192.168.204.102:8774/v2/%(tenant_id)s',
|
|
||||||
u'region': u'RegionTwo', u'enabled': True,
|
|
||||||
u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac',
|
|
||||||
u'id': u'373259a6bbcf493b86c9f9530e86d324',
|
|
||||||
u'interface': u'internal'},
|
|
||||||
{u'url': u'http://10.10.10.2:8774/v2/%(tenant_id)s',
|
|
||||||
u'region': u'RegionTwo', u'enabled': True,
|
|
||||||
u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac',
|
|
||||||
u'id': u'373259a6bbcf493b86c9f9530e86d324',
|
|
||||||
u'interface': u'public'},
|
|
||||||
|
|
||||||
{u'url': u'http://192.168.204.102:8004/v1/%(tenant_id)s',
|
|
||||||
u'region': u'RegionTwo', u'enabled': True,
|
|
||||||
u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3',
|
|
||||||
u'id': u'c51dc9354b5a41c9883ec3871b9fd271',
|
|
||||||
u'interface': u'admin'},
|
|
||||||
{u'url': u'http://192.168.204.102:8004/v1/%(tenant_id)s',
|
|
||||||
u'region': u'RegionTwo', u'enabled': True,
|
|
||||||
u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3',
|
|
||||||
u'id': u'c51dc9354b5a41c9883ec3871b9fd272',
|
|
||||||
u'interface': u'internal'},
|
|
||||||
{u'url': u'http://10.10.10.2:8004/v1/%(tenant_id)s',
|
|
||||||
u'region': u'RegionTwo', u'enabled': True,
|
|
||||||
u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3',
|
|
||||||
u'id': u'c51dc9354b5a41c9883ec3871b9fd273',
|
|
||||||
u'interface': u'public'},
|
|
||||||
|
|
||||||
{u'url': u'http://192.168.204.12:8000/v1', u'region': u'RegionOne',
|
|
||||||
u'enabled': True, u'interface': u'admin',
|
|
||||||
u'id': u'e132bb9dd0fe459687c3b04074bcb1ac',
|
|
||||||
u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa'},
|
|
||||||
{u'url': u'http://192.168.204.12:8000/v1', u'region': u'RegionOne',
|
|
||||||
u'enabled': True, u'interface': u'internal',
|
|
||||||
u'id': u'e132bb9dd0fe459687c3b04074bcb1ad',
|
|
||||||
u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa'},
|
|
||||||
{u'url': u'http://10.10.10.2:8000/v1', u'region': u'RegionOne',
|
|
||||||
u'enabled': True, u'interface': u'public',
|
|
||||||
u'id': u'e132bb9dd0fe459687c3b04074bcb1ae',
|
|
||||||
u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa'},
|
|
||||||
|
|
||||||
{u'url': u'http://192.168.204.102:8774/v3', u'region': u'RegionTwo',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'959f2214543a47549ffd8c66f98d27d4',
|
|
||||||
u'id': u'031bfbfd581f4a42b361f93fdc4fe266',
|
|
||||||
u'interface': u'admin'},
|
|
||||||
{u'url': u'http://192.168.204.102:8774/v3', u'region': u'RegionTwo',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'959f2214543a47549ffd8c66f98d27d4',
|
|
||||||
u'id': u'031bfbfd581f4a42b361f93fdc4fe267',
|
|
||||||
u'interface': u'internal'},
|
|
||||||
{u'url': u'http://10.10.10.2:8774/v3', u'region': u'RegionTwo',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'959f2214543a47549ffd8c66f98d27d4',
|
|
||||||
u'id': u'031bfbfd581f4a42b361f93fdc4fe268',
|
|
||||||
u'interface': u'public'},
|
|
||||||
|
|
||||||
{u'url': u'http://192.168.204.12:8081/keystone/admin/v2.0',
|
|
||||||
u'region': u'RegionOne', u'enabled': True,
|
|
||||||
u'service_id': u'1fe7b1de187b47228fe853fbbd149664',
|
|
||||||
u'id': u'6fa36df1cc4f4e97a1c12767c8a1159f',
|
|
||||||
u'interface': u'admin'},
|
|
||||||
{u'url': u'http://192.168.204.12:8081/keystone/main/v2.0',
|
|
||||||
u'region': u'RegionOne', u'enabled': True,
|
|
||||||
u'service_id': u'1fe7b1de187b47228fe853fbbd149664',
|
|
||||||
u'id': u'6fa36df1cc4f4e97a1c12767c8a11510',
|
|
||||||
u'interface': u'internal'},
|
|
||||||
{u'url': u'http://10.10.10.2:8081/keystone/main/v2.0',
|
|
||||||
u'region': u'RegionOne', u'enabled': True,
|
|
||||||
u'service_id': u'1fe7b1de187b47228fe853fbbd149664',
|
|
||||||
u'id': u'6fa36df1cc4f4e97a1c12767c8a11512',
|
|
||||||
u'interface': u'public'},
|
|
||||||
|
|
||||||
{u'url': u'http://192.168.204.102:9696/', u'region': u'RegionTwo',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'85a8a3342a644df193af4b68d5b65ce5',
|
|
||||||
u'id': u'74a7a918dd854b66bb33f1e4e0e768bc',
|
|
||||||
u'interface': u'admin'},
|
|
||||||
{u'url': u'http://192.168.204.102:9696/', u'region': u'RegionTwo',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'85a8a3342a644df193af4b68d5b65ce5',
|
|
||||||
u'id': u'74a7a918dd854b66bb33f1e4e0e768bd',
|
|
||||||
u'interface': u'internal'},
|
|
||||||
{u'url': u'http://10.10.10.2:9696/', u'region': u'RegionTwo',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'85a8a3342a644df193af4b68d5b65ce5',
|
|
||||||
u'id': u'74a7a918dd854b66bb33f1e4e0e768be',
|
|
||||||
u'interface': u'public'},
|
|
||||||
|
|
||||||
{u'url': u'http://192.168.204.102:6385/v1', u'region': u'RegionTwo',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'08758bed8d894ddaae744a97db1080b3',
|
|
||||||
u'id': u'd8ae3a69f08046d1a8f031bbd65381a3',
|
|
||||||
u'interface': u'admin'},
|
|
||||||
{u'url': u'http://192.168.204.102:6385/v1', u'region': u'RegionTwo',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'08758bed8d894ddaae744a97db1080b3',
|
|
||||||
u'id': u'd8ae3a69f08046d1a8f031bbd65381a4',
|
|
||||||
u'interface': u'internal'},
|
|
||||||
{u'url': u'http://10.10.10.2:6385/v1', u'region': u'RegionTwo',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'08758bed8d894ddaae744a97db1080b5',
|
|
||||||
u'id': u'd8ae3a69f08046d1a8f031bbd65381a3',
|
|
||||||
u'interface': u'public'},
|
|
||||||
|
|
||||||
{u'url': u'http://192.168.204.12:8004/v1/$(tenant_id)s',
|
|
||||||
u'region': u'RegionOne', u'enabled': True,
|
|
||||||
u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3',
|
|
||||||
u'id': u'61ad227efa3b4cdd867618041a7064dc',
|
|
||||||
u'interface': u'admin'},
|
|
||||||
{u'url': u'http://192.168.204.12:8004/v1/$(tenant_id)s',
|
|
||||||
u'region': u'RegionOne', u'enabled': True,
|
|
||||||
u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3',
|
|
||||||
u'id': u'61ad227efa3b4cdd867618041a7064dd',
|
|
||||||
u'interface': u'internal'},
|
|
||||||
{u'url': u'http://10.10.10.2:8004/v1/$(tenant_id)s',
|
|
||||||
u'region': u'RegionOne', u'enabled': True,
|
|
||||||
u'service_id': u'5765bee52eec43bb8e0632ecb225d0e3',
|
|
||||||
u'id': u'61ad227efa3b4cdd867618041a7064de',
|
|
||||||
u'interface': u'public'},
|
|
||||||
|
|
||||||
{u'url': u'http://192.168.204.12:8888/v1', u'region': u'RegionOne',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'd588956f759f4bbda9e65a1019902b9c',
|
|
||||||
u'id': u'be557ddb742e46328159749a21e6e286',
|
|
||||||
u'interface': u'admin'},
|
|
||||||
{u'url': u'http://192.168.204.12:8888/v1/AUTH_$(tenant_id)s',
|
|
||||||
u'region': u'RegionOne', u'enabled': True,
|
|
||||||
u'service_id': u'd588956f759f4bbda9e65a1019902b9c',
|
|
||||||
u'id': u'be557ddb742e46328159749a21e6e287',
|
|
||||||
u'interface': u'internal'},
|
|
||||||
{u'url': u'http://10.10.10.12:8888/v1/AUTH_$(tenant_id)s',
|
|
||||||
u'region': u'RegionOne', u'enabled': True,
|
|
||||||
u'service_id': u'd588956f759f4bbda9e65a1019902b9c',
|
|
||||||
u'id': u'be557ddb742e46328159749a21e6e288',
|
|
||||||
u'interface': u'public'},
|
|
||||||
|
|
||||||
{u'url': u'http://192.168.204.102:8777', u'region': u'RegionTwo',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba',
|
|
||||||
u'id': u'050d07db8c5041288f29020079177f0b',
|
|
||||||
u'interface': u'admin'},
|
|
||||||
{u'url': u'http://192.168.204.102:8777', u'region': u'RegionTwo',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba',
|
|
||||||
u'id': u'050d07db8c5041288f29020079177f0c',
|
|
||||||
u'interface': u'internal'},
|
|
||||||
{u'url': u'http://10.10.10.2:8777', u'region': u'RegionTwo',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba',
|
|
||||||
u'id': u'050d07db8c5041288f29020079177f0d',
|
|
||||||
u'interface': u'public'},
|
|
||||||
|
|
||||||
{u'url': u'http://192.168.204.102:5491', u'region': u'RegionTwo',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'8515c4f28f9346199eb8704bca4f5db4',
|
|
||||||
u'id': u'53af565e4d7245929df7af2ba0ff46db',
|
|
||||||
u'interface': u'admin'},
|
|
||||||
{u'url': u'http://192.168.204.102:5491', u'region': u'RegionTwo',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'8515c4f28f9346199eb8704bca4f5db4',
|
|
||||||
u'id': u'53af565e4d7245929df7af2ba0ff46dc',
|
|
||||||
u'interface': u'internal'},
|
|
||||||
{u'url': u'http://10.10.10.2:5491', u'region': u'RegionTwo',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'8515c4f28f9346199eb8704bca4f5db4',
|
|
||||||
u'id': u'53af565e4d7245929df7af2ba0ff46dd',
|
|
||||||
u'interface': u'public'},
|
|
||||||
|
|
||||||
{u'url': u'http://192.168.204.12:8779/v1.0/$(tenant_id)s',
|
|
||||||
u'region': u'RegionOne', u'enabled': True,
|
|
||||||
u'service_id': u'82265e39a77b4097bd8aee4f78e13867',
|
|
||||||
u'id': u'9a1cc90a7ac342d0900a0449ca4eabfe',
|
|
||||||
u'interface': u'admin'},
|
|
||||||
{u'url': u'http://192.168.204.12:8779/v1.0/$(tenant_id)s',
|
|
||||||
u'region': u'RegionOne', u'enabled': True,
|
|
||||||
u'service_id': u'82265e39a77b4097bd8aee4f78e13867',
|
|
||||||
u'id': u'9a1cc90a7ac342d0900a0449ca4eabfe',
|
|
||||||
u'interface': u'internal'},
|
|
||||||
{u'url': u'http://10.10.10.2:8779/v1.0/$(tenant_id)s',
|
|
||||||
u'region': u'RegionOne', u'enabled': True,
|
|
||||||
u'service_id': u'82265e39a77b4097bd8aee4f78e13867',
|
|
||||||
u'id': u'9a1cc90a7ac342d0900a0449ca4eabfe',
|
|
||||||
u'interface': u'public'},
|
|
||||||
|
|
||||||
{u'url': u'http://192.168.204.12:9292/v2', u'region': u'RegionOne',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'd41750c98a864fdfb25c751b4ad84996',
|
|
||||||
u'id': u'06fdb367cb63414987ee1653a016d10a',
|
|
||||||
u'interface': u'admin'},
|
|
||||||
{u'url': u'http://192.168.204.12:9292/v2', u'region': u'RegionOne',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'd41750c98a864fdfb25c751b4ad84996',
|
|
||||||
u'id': u'06fdb367cb63414987ee1653a016d10b',
|
|
||||||
u'interface': u'internal'},
|
|
||||||
{u'url': u'http://10.10.10.2:9292/v2', u'region': u'RegionOne',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'd41750c98a864fdfb25c751b4ad84996',
|
|
||||||
u'id': u'06fdb367cb63414987ee1653a016d10c',
|
|
||||||
u'interface': u'public'},
|
|
||||||
|
|
||||||
{u'url': u'http://192.168.204.102:9292/v2', u'region': u'RegionTwo',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'd41750c98a864fdfb25c751b4ad84996',
|
|
||||||
u'id': u'06fdb367cb63414987ee1653a016d10a',
|
|
||||||
u'interface': u'admin'},
|
|
||||||
{u'url': u'http://192.168.204.102:9292/v2', u'region': u'RegionTwo',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'd41750c98a864fdfb25c751b4ad84996',
|
|
||||||
u'id': u'06fdb367cb63414987ee1653a016d10b',
|
|
||||||
u'interface': u'internal'},
|
|
||||||
{u'url': u'http://10.10.10.12:9292/v2', u'region': u'RegionTwo',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'd41750c98a864fdfb25c751b4ad84996',
|
|
||||||
u'id': u'06fdb367cb63414987ee1653a016d10c',
|
|
||||||
u'interface': u'public'},
|
|
||||||
|
|
||||||
|
|
||||||
{u'url': u'http://192.168.204.12:8777/', u'region': u'RegionOne',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba',
|
|
||||||
u'id': u'f15d22a9526648ff8833460e2dce1431',
|
|
||||||
u'interface': u'admin'},
|
|
||||||
{u'url': u'http://192.168.204.12:8777/', u'region': u'RegionOne',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba',
|
|
||||||
u'id': u'f15d22a9526648ff8833460e2dce1432',
|
|
||||||
u'interface': u'internal'},
|
|
||||||
{u'url': u'http://10.10.10.12:8777/', u'region': u'RegionOne',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'4c07eadd3d0c45eb9a3b1507baa278ba',
|
|
||||||
u'id': u'f15d22a9526648ff8833460e2dce1433',
|
|
||||||
u'interface': u'public'},
|
|
||||||
|
|
||||||
{u'url': u'http://192.168.204.102:8000/v1/', u'region': u'RegionTwo',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa',
|
|
||||||
u'id': u'5e6c6ffdbcd544f8838430937a0d81a7',
|
|
||||||
u'interface': u'admin'},
|
|
||||||
{u'url': u'http://192.168.204.102:8000/v1/', u'region': u'RegionTwo',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa',
|
|
||||||
u'id': u'5e6c6ffdbcd544f8838430937a0d81a8',
|
|
||||||
u'interface': u'internal'},
|
|
||||||
{u'url': u'http://10.10.10.2:8000/v1/', u'region': u'RegionTwo',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'abbf431acb6d45919cfbefe55a0f27fa',
|
|
||||||
u'id': u'5e6c6ffdbcd544f8838430937a0d81a9',
|
|
||||||
u'interface': u'public'},
|
|
||||||
|
|
||||||
{u'url': u'http://192.168.204.12:8774/v2/$(tenant_id)s',
|
|
||||||
u'region': u'RegionOne', u'enabled': True,
|
|
||||||
u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac',
|
|
||||||
u'id': u'87dc648502ee49fb86a4ca87d8d6028d',
|
|
||||||
u'interface': u'admin'},
|
|
||||||
{u'url': u'http://192.168.204.12:8774/v2/$(tenant_id)s',
|
|
||||||
u'region': u'RegionOne', u'enabled': True,
|
|
||||||
u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac',
|
|
||||||
u'id': u'87dc648502ee49fb86a4ca87d8d6028e',
|
|
||||||
u'interface': u'internal'},
|
|
||||||
{u'url': u'http://10.10.10.2:8774/v2/$(tenant_id)s',
|
|
||||||
u'region': u'RegionOne', u'enabled': True,
|
|
||||||
u'service_id': u'9c46a6ea929f4c52bc92dd9bb9f852ac',
|
|
||||||
u'id': u'87dc648502ee49fb86a4ca87d8d6028f',
|
|
||||||
u'interface': u'public'},
|
|
||||||
|
|
||||||
{u'url': u'http://192.168.204.12:9696/', u'region': u'RegionOne',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'85a8a3342a644df193af4b68d5b65ce5',
|
|
||||||
u'id': u'd326bf63f6f94b12924b03ff42ba63bd',
|
|
||||||
u'interface': u'admin'},
|
|
||||||
{u'url': u'http://192.168.204.12:9696/', u'region': u'RegionOne',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'85a8a3342a644df193af4b68d5b65ce5',
|
|
||||||
u'id': u'd326bf63f6f94b12924b03ff42ba63be',
|
|
||||||
u'interface': u'internal'},
|
|
||||||
{u'url': u'http://10.10.10.12:9696/', u'region': u'RegionOne',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'85a8a3342a644df193af4b68d5b65ce5',
|
|
||||||
u'id': u'd326bf63f6f94b12924b03ff42ba63bf',
|
|
||||||
u'interface': u'public'},
|
|
||||||
|
|
||||||
{u'url': u'http://192.168.204.12:8776/v2/$(tenant_id)s',
|
|
||||||
u'region': u'RegionOne', u'enabled': True,
|
|
||||||
u'service_id': u'e6e356112daa4af588d9b9dadcf98bc4',
|
|
||||||
u'id': u'61b8bb77edf644f1ad4edf9b953d44c7',
|
|
||||||
u'interface': u'admin'},
|
|
||||||
{u'url': u'http://192.168.204.12:8776/v2/$(tenant_id)s',
|
|
||||||
u'region': u'RegionOne', u'enabled': True,
|
|
||||||
u'service_id': u'e6e356112daa4af588d9b9dadcf98bc4',
|
|
||||||
u'id': u'61b8bb77edf644f1ad4edf9b953d44c8',
|
|
||||||
u'interface': u'internal'},
|
|
||||||
{u'url': u'http://10.10.10.12:8776/v2/$(tenant_id)s',
|
|
||||||
u'region': u'RegionOne', u'enabled': True,
|
|
||||||
u'service_id': u'e6e356112daa4af588d9b9dadcf98bc4',
|
|
||||||
u'id': u'61b8bb77edf644f1ad4edf9b953d44c9',
|
|
||||||
u'interface': u'public'},
|
|
||||||
|
|
||||||
{u'url': u'http://192.168.204.12:9312/v1', u'region': u'RegionOne',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'9029af23540f4eecb0b7f70ac5e00152',
|
|
||||||
u'id': u'a1aa2af22caf460eb421d75ab1ce6125',
|
|
||||||
u'interface': u'admin'},
|
|
||||||
{u'url': u'http://192.168.204.12:9312/v1', u'region': u'RegionOne',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'9029af23540f4eecb0b7f70ac5e00152',
|
|
||||||
u'id': u'a1aa2af22caf460eb421d75ab1ce6126',
|
|
||||||
u'interface': u'internal'},
|
|
||||||
{u'url': u'http://10.10.10.12:9312/v1', u'region': u'RegionOne',
|
|
||||||
u'enabled': True,
|
|
||||||
u'service_id': u'9029af23540f4eecb0b7f70ac5e00152',
|
|
||||||
u'id': u'a1aa2af22caf460eb421d75ab1ce6127',
|
|
||||||
u'interface': u'public'}]}
|
|
||||||
|
|
||||||
FAKE_DOMAIN_DATA = {u'domains': [
|
|
||||||
{u'id': u'default', u'enabled': True,
|
|
||||||
u'description':
|
|
||||||
u'Owns users and tenants (i.e. projects) available on Identity API '
|
|
||||||
u'v2.',
|
|
||||||
u'links': {
|
|
||||||
u'self':
|
|
||||||
u'http://192.168.204.12:8081/keystone/main/v3/domains/default'},
|
|
||||||
u'name': u'Default'},
|
|
||||||
{u'id': u'05d847889e9a4cb9aa94f541eb6b9e2e',
|
|
||||||
u'enabled': True,
|
|
||||||
u'description': u'Contains users and projects created by heat',
|
|
||||||
u'links': {
|
|
||||||
u'self':
|
|
||||||
u'http://192.168.204.12:8081/keystone/main/v3/domains/'
|
|
||||||
u'05d847889e9a4cb9aa94f541eb6b9e2e'},
|
|
||||||
u'name': u'heat'}],
|
|
||||||
u'links': {
|
|
||||||
u'self': u'http://192.168.204.12:8081/keystone/main/v3/domains',
|
|
||||||
u'next': None,
|
|
||||||
u'previous': None}}
|
|
||||||
|
|
||||||
|
|
||||||
def _dump_config(config):
|
|
||||||
""" Prints contents of config object """
|
|
||||||
for section in config.sections():
|
|
||||||
print("[%s]" % section)
|
|
||||||
for (name, value) in config.items(section):
|
|
||||||
print("%s=%s" % (name, value))
|
|
||||||
|
|
||||||
|
|
||||||
def _replace_in_file(filename, old, new):
|
|
||||||
""" Replaces old with new in file filename. """
|
|
||||||
for line in fileinput.FileInput(filename, inplace=1):
|
|
||||||
line = line.replace(old, new)
|
|
||||||
print(line, end='')
|
|
||||||
fileinput.close()
|
|
||||||
|
|
||||||
|
|
||||||
@patch('controllerconfig.configassistant.ConfigAssistant.get_sysadmin_sig')
|
|
||||||
def _test_region_config(tmpdir, inputfile, resultfile,
|
|
||||||
mock_get_sysadmin_sig):
|
|
||||||
""" Test import and generation of answerfile """
|
|
||||||
|
|
||||||
mock_get_sysadmin_sig.return_value = None
|
|
||||||
|
|
||||||
# Create the path to the output file
|
|
||||||
outputfile = os.path.join(str(tmpdir), 'output')
|
|
||||||
|
|
||||||
# Parse the region_config file
|
|
||||||
region_config = cr.parse_system_config(inputfile)
|
|
||||||
|
|
||||||
# Dump results for debugging
|
|
||||||
print("Parsed region_config:\n")
|
|
||||||
_dump_config(region_config)
|
|
||||||
|
|
||||||
# Validate the region config file
|
|
||||||
cr.create_cgcs_config_file(outputfile, region_config,
|
|
||||||
keystone.ServiceList(FAKE_SERVICE_DATA),
|
|
||||||
keystone.EndpointList(FAKE_ENDPOINT_DATA),
|
|
||||||
keystone.DomainList(FAKE_DOMAIN_DATA))
|
|
||||||
|
|
||||||
# Make a local copy of the results file
|
|
||||||
local_resultfile = os.path.join(str(tmpdir), 'result')
|
|
||||||
shutil.copyfile(resultfile, local_resultfile)
|
|
||||||
|
|
||||||
# Do a diff between the output and the expected results
|
|
||||||
print("\n\nDiff of output file vs. expected results file:\n")
|
|
||||||
with open(outputfile) as a, open(local_resultfile) as b:
|
|
||||||
a_lines = a.readlines()
|
|
||||||
b_lines = b.readlines()
|
|
||||||
|
|
||||||
differ = difflib.Differ()
|
|
||||||
diff = differ.compare(a_lines, b_lines)
|
|
||||||
print(''.join(diff))
|
|
||||||
# Fail the testcase if the output doesn't match the expected results
|
|
||||||
assert filecmp.cmp(outputfile, local_resultfile)
|
|
||||||
|
|
||||||
# Now test that configassistant can parse this answerfile. We can't
|
|
||||||
# compare the resulting cgcs_config file because the ordering, spacing
|
|
||||||
# and comments are different between the answerfile generated by
|
|
||||||
# systemconfig and ConfigAssistant.
|
|
||||||
test_answerfile._test_answerfile(tmpdir, outputfile, compare_results=False)
|
|
||||||
|
|
||||||
# Validate the region config file.
|
|
||||||
# Using onboard validation since the validator's reference version number
|
|
||||||
# is only set at build-time when validating offboard
|
|
||||||
validate(region_config, REGION_CONFIG, None, False)
|
|
||||||
|
|
||||||
|
|
||||||
def test_region_config_simple(tmpdir):
|
|
||||||
""" Test import of simple region_config file """
|
|
||||||
|
|
||||||
regionfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/",
|
|
||||||
"region_config.simple")
|
|
||||||
resultfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/",
|
|
||||||
"region_config.simple.result")
|
|
||||||
|
|
||||||
_test_region_config(tmpdir, regionfile, resultfile)
|
|
||||||
|
|
||||||
|
|
||||||
def test_region_config_simple_can_ips(tmpdir):
|
|
||||||
""" Test import of simple region_config file with unit ips for CAN """
|
|
||||||
print("IN TEST ################################################")
|
|
||||||
regionfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/",
|
|
||||||
"region_config.simple.can_ips")
|
|
||||||
resultfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/",
|
|
||||||
"region_config.simple.result")
|
|
||||||
|
|
||||||
_test_region_config(tmpdir, regionfile, resultfile)
|
|
||||||
|
|
||||||
|
|
||||||
def test_region_config_lag_vlan(tmpdir):
|
|
||||||
""" Test import of region_config file with lag and vlan """
|
|
||||||
|
|
||||||
regionfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/",
|
|
||||||
"region_config.lag.vlan")
|
|
||||||
resultfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/",
|
|
||||||
"region_config.lag.vlan.result")
|
|
||||||
|
|
||||||
_test_region_config(tmpdir, regionfile, resultfile)
|
|
||||||
|
|
||||||
|
|
||||||
def test_region_config_security(tmpdir):
|
|
||||||
""" Test import of region_config file with security config """
|
|
||||||
|
|
||||||
regionfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/",
|
|
||||||
"region_config.security")
|
|
||||||
resultfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/",
|
|
||||||
"region_config.security.result")
|
|
||||||
_test_region_config(tmpdir, regionfile, resultfile)
|
|
||||||
|
|
||||||
|
|
||||||
def test_region_config_nuage_vrs(tmpdir):
|
|
||||||
""" Test import of region_config file with nuage vrs config """
|
|
||||||
|
|
||||||
regionfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/",
|
|
||||||
"region_config.nuage_vrs")
|
|
||||||
resultfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/",
|
|
||||||
"region_config.nuage_vrs.result")
|
|
||||||
_test_region_config(tmpdir, regionfile, resultfile)
|
|
||||||
|
|
||||||
|
|
||||||
def test_region_config_share_keystone_only(tmpdir):
|
|
||||||
""" Test import of Titanium Cloud region_config file with
|
|
||||||
shared keystone """
|
|
||||||
|
|
||||||
regionfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/",
|
|
||||||
"TiS_region_config.share.keystoneonly")
|
|
||||||
resultfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/",
|
|
||||||
"TiS_region_config.share.keystoneonly.result")
|
|
||||||
_test_region_config(tmpdir, regionfile, resultfile)
|
|
||||||
|
|
||||||
|
|
||||||
def test_region_config_share_keystone_glance_cinder(tmpdir):
|
|
||||||
""" Test import of Titanium Cloud region_config file with shared keystone,
|
|
||||||
glance and cinder """
|
|
||||||
|
|
||||||
regionfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/",
|
|
||||||
"TiS_region_config.shareall")
|
|
||||||
resultfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/",
|
|
||||||
"TiS_region_config.shareall.result")
|
|
||||||
_test_region_config(tmpdir, regionfile, resultfile)
|
|
||||||
|
|
||||||
|
|
||||||
def test_region_config_validation():
|
|
||||||
""" Test detection of various errors in region_config file """
|
|
||||||
|
|
||||||
# Create the path to the region_config files
|
|
||||||
simple_regionfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/", "region_config.simple")
|
|
||||||
lag_vlan_regionfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/", "region_config.lag.vlan")
|
|
||||||
|
|
||||||
# Test detection of non-required CINDER_* parameters
|
|
||||||
region_config = cr.parse_system_config(simple_regionfile)
|
|
||||||
region_config.set('STORAGE', 'CINDER_BACKEND', 'lvm')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(region_config, REGION_CONFIG, None, True)
|
|
||||||
|
|
||||||
region_config = cr.parse_system_config(simple_regionfile)
|
|
||||||
region_config.set('STORAGE', 'CINDER_DEVICE',
|
|
||||||
'/dev/disk/by-path/pci-0000:00:0d.0-ata-3.0')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(region_config, REGION_CONFIG, None, False)
|
|
||||||
|
|
||||||
region_config = cr.parse_system_config(simple_regionfile)
|
|
||||||
region_config.set('STORAGE', 'CINDER_STORAGE', '10')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(region_config, REGION_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test detection of an invalid PXEBOOT_CIDR
|
|
||||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
|
||||||
region_config.set('REGION2_PXEBOOT_NETWORK', 'PXEBOOT_CIDR',
|
|
||||||
'FD00::0000/64')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(region_config, REGION_CONFIG, None, False)
|
|
||||||
|
|
||||||
region_config.set('REGION2_PXEBOOT_NETWORK', 'PXEBOOT_CIDR',
|
|
||||||
'192.168.1.0/29')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(region_config, REGION_CONFIG, None, False)
|
|
||||||
|
|
||||||
region_config.remove_option('REGION2_PXEBOOT_NETWORK', 'PXEBOOT_CIDR')
|
|
||||||
with pytest.raises(configparser.NoOptionError):
|
|
||||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(configparser.NoOptionError):
|
|
||||||
validate(region_config, REGION_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test overlap of CLM_CIDR
|
|
||||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
|
||||||
region_config.set('CLM_NETWORK', 'CLM_CIDR', '192.168.203.0/26')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(region_config, REGION_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test invalid CLM LAG_MODE
|
|
||||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
|
||||||
region_config.set('LOGICAL_INTERFACE_1', 'LAG_MODE', '2')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(region_config, REGION_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test CLM_VLAN not allowed
|
|
||||||
region_config = cr.parse_system_config(simple_regionfile)
|
|
||||||
region_config.set('CLM_NETWORK', 'CLM_VLAN', '123')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(region_config, REGION_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test CLM_VLAN missing
|
|
||||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
|
||||||
region_config.remove_option('CLM_NETWORK', 'CLM_VLAN')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(region_config, REGION_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test overlap of CAN_CIDR
|
|
||||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
|
||||||
region_config.set('CAN_NETWORK', 'CAN_CIDR', '192.168.203.0/26')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(region_config, REGION_CONFIG, None, False)
|
|
||||||
|
|
||||||
region_config.set('CAN_NETWORK', 'CAN_CIDR', '192.168.204.0/26')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(region_config, REGION_CONFIG, None, False)
|
|
||||||
|
|
||||||
region_config.set('CAN_NETWORK', 'CAN_CIDR', '192.168.205.0/26')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(region_config, REGION_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test invalid CAN LAG_MODE
|
|
||||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
|
||||||
region_config.add_section('LOGICAL_INTERFACE_2')
|
|
||||||
region_config.set('LOGICAL_INTERFACE_2', 'LAG_INTERFACE', 'Y')
|
|
||||||
region_config.set('LOGICAL_INTERFACE_2', 'LAG_MODE', '3')
|
|
||||||
region_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_MTU', '1500')
|
|
||||||
region_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_PORTS', 'eth3,eth4')
|
|
||||||
region_config.set('CAN_NETWORK', 'CAN_LOGICAL_INTERFACE',
|
|
||||||
'LOGICAL_INTERFACE_2')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(region_config, REGION_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test CAN_VLAN overlap
|
|
||||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
|
||||||
region_config.set('CAN_NETWORK', 'CAN_VLAN', '123')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(region_config, REGION_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test CAN_VLAN missing
|
|
||||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
|
||||||
region_config.remove_option('CAN_NETWORK', 'CAN_VLAN')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(region_config, REGION_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test missing gateway
|
|
||||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
|
||||||
region_config.remove_option('CLM_NETWORK', 'CLM_GATEWAY')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(region_config, REGION_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test two gateways
|
|
||||||
region_config = cr.parse_system_config(lag_vlan_regionfile)
|
|
||||||
region_config.set('CAN_NETWORK', 'CAN_GATEWAY', '10.10.10.1')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, region_config, None, None, None,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(region_config, REGION_CONFIG, None, False)
|
|
|
@ -1,601 +0,0 @@
|
||||||
"""
|
|
||||||
Copyright (c) 2014-2019 Wind River Systems, Inc.
|
|
||||||
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from six.moves import configparser
|
|
||||||
import mock
|
|
||||||
import os
|
|
||||||
import pytest
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import controllerconfig.common.exceptions as exceptions
|
|
||||||
from controllerconfig import validate
|
|
||||||
from controllerconfig import DEFAULT_CONFIG
|
|
||||||
|
|
||||||
sys.modules['fm_core'] = mock.Mock()
|
|
||||||
|
|
||||||
import controllerconfig.systemconfig as cr # noqa: E402
|
|
||||||
|
|
||||||
|
|
||||||
def _dump_config(config):
|
|
||||||
""" Prints contents of config object """
|
|
||||||
for section in config.sections():
|
|
||||||
print("[%s]" % section)
|
|
||||||
for (name, value) in config.items(section):
|
|
||||||
print("%s=%s" % (name, value))
|
|
||||||
|
|
||||||
|
|
||||||
def _test_system_config(filename):
|
|
||||||
""" Test import and generation of answerfile """
|
|
||||||
|
|
||||||
# Parse the system_config file
|
|
||||||
system_config = cr.parse_system_config(filename)
|
|
||||||
|
|
||||||
# Dump results for debugging
|
|
||||||
print("Parsed system_config:\n")
|
|
||||||
_dump_config(system_config)
|
|
||||||
|
|
||||||
# Validate the system config file
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
|
|
||||||
# Validate the system config file.
|
|
||||||
# Using onboard validation since the validator's reference version number
|
|
||||||
# is only set at build-time when validating offboard
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
|
|
||||||
def test_system_config_simple():
|
|
||||||
""" Test import of simple system_config file """
|
|
||||||
|
|
||||||
# Create the path to the system_config file
|
|
||||||
systemfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/", "system_config.simple")
|
|
||||||
|
|
||||||
_test_system_config(systemfile)
|
|
||||||
|
|
||||||
|
|
||||||
def test_system_config_ipv6():
|
|
||||||
""" Test import of system_config file with ipv6 oam """
|
|
||||||
|
|
||||||
# Create the path to the system_config file
|
|
||||||
systemfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/", "system_config.ipv6")
|
|
||||||
|
|
||||||
_test_system_config(systemfile)
|
|
||||||
|
|
||||||
|
|
||||||
def test_system_config_lag_vlan():
|
|
||||||
""" Test import of system_config file with lag and vlan """
|
|
||||||
|
|
||||||
# Create the path to the system_config file
|
|
||||||
systemfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/", "system_config.lag.vlan")
|
|
||||||
|
|
||||||
_test_system_config(systemfile)
|
|
||||||
|
|
||||||
|
|
||||||
def test_system_config_security():
|
|
||||||
""" Test import of system_config file with security config """
|
|
||||||
|
|
||||||
# Create the path to the system_config file
|
|
||||||
systemfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/", "system_config.security")
|
|
||||||
|
|
||||||
_test_system_config(systemfile)
|
|
||||||
|
|
||||||
|
|
||||||
def test_system_config_ceph():
|
|
||||||
""" Test import of system_config file with ceph config """
|
|
||||||
|
|
||||||
# Create the path to the system_config file
|
|
||||||
systemfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/", "system_config.ceph")
|
|
||||||
|
|
||||||
_test_system_config(systemfile)
|
|
||||||
|
|
||||||
|
|
||||||
def test_system_config_simplex():
|
|
||||||
""" Test import of system_config file for AIO-simplex """
|
|
||||||
|
|
||||||
# Create the path to the system_config file
|
|
||||||
systemfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/", "system_config.simplex")
|
|
||||||
|
|
||||||
_test_system_config(systemfile)
|
|
||||||
|
|
||||||
|
|
||||||
def test_system_config_simplex_mgmt():
|
|
||||||
""" Test import of system_config file for AIO-simplex with management
|
|
||||||
configuration"""
|
|
||||||
|
|
||||||
# Create the path to the system_config file
|
|
||||||
systemfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/",
|
|
||||||
"system_config.simplex_mgmt")
|
|
||||||
|
|
||||||
_test_system_config(systemfile)
|
|
||||||
|
|
||||||
# Test MGMT_NETWORK parameters that are not allowed
|
|
||||||
system_config = cr.parse_system_config(systemfile)
|
|
||||||
system_config.set('MGMT_NETWORK', 'GATEWAY', '192.168.42.1')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
system_config = cr.parse_system_config(systemfile)
|
|
||||||
system_config.set('MGMT_NETWORK', 'LOGICAL_INTERFACE',
|
|
||||||
'LOGICAL_INTERFACE_1')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test overlap with OAM network
|
|
||||||
system_config = cr.parse_system_config(systemfile)
|
|
||||||
system_config.set('MGMT_NETWORK', 'CIDR', '10.10.10.0/24')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test IPv6 management CIDR (not supported)
|
|
||||||
system_config = cr.parse_system_config(systemfile)
|
|
||||||
system_config.set('MGMT_NETWORK', 'CIDR', 'FD01::0000/64')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test management CIDR that is too small
|
|
||||||
system_config = cr.parse_system_config(systemfile)
|
|
||||||
system_config.set('MGMT_NETWORK', 'CIDR', '192.168.42.0/29')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
|
|
||||||
def test_system_config_validation():
|
|
||||||
""" Test detection of various errors in system_config file """
|
|
||||||
|
|
||||||
# Create the path to the system_config files
|
|
||||||
simple_systemfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/", "system_config.simple")
|
|
||||||
ipv6_systemfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/", "system_config.ipv6")
|
|
||||||
lag_vlan_systemfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/", "system_config.lag.vlan")
|
|
||||||
ceph_systemfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/", "system_config.ceph")
|
|
||||||
static_addr_systemfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/",
|
|
||||||
"system_config.static_addr")
|
|
||||||
|
|
||||||
# Test floating outside of OAM_NETWORK CIDR
|
|
||||||
system_config = cr.parse_system_config(ipv6_systemfile)
|
|
||||||
system_config.set('OAM_NETWORK', 'IP_FLOATING_ADDRESS', '5555::5')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test non-ipv6 unit address
|
|
||||||
system_config = cr.parse_system_config(ipv6_systemfile)
|
|
||||||
system_config.set('OAM_NETWORK', 'IP_UNIT_0_ADDRESS', '10.10.10.3')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test missing pxeboot network when using IPv6 management network
|
|
||||||
system_config = cr.parse_system_config(ipv6_systemfile)
|
|
||||||
system_config.remove_section('PXEBOOT_NETWORK')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test ridiculously sized management network
|
|
||||||
system_config = cr.parse_system_config(ipv6_systemfile)
|
|
||||||
system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '1234::b:0:0:0')
|
|
||||||
system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS',
|
|
||||||
'1234::b:ffff:ffff:ffff')
|
|
||||||
system_config.remove_option('MGMT_NETWORK', 'IP_FLOATING_ADDRESS')
|
|
||||||
system_config.remove_option('MGMT_NETWORK', 'IP_UNIT_0_ADDRESS')
|
|
||||||
system_config.remove_option('MGMT_NETWORK', 'IP_UNIT_1_ADDRESS')
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test using start/end addresses
|
|
||||||
system_config = cr.parse_system_config(ipv6_systemfile)
|
|
||||||
system_config.set('OAM_NETWORK', 'IP_START_ADDRESS', 'abcd::2')
|
|
||||||
system_config.set('OAM_NETWORK', 'IP_END_ADDRESS', 'abcd::4')
|
|
||||||
system_config.remove_option('OAM_NETWORK', 'IP_FLOATING_ADDRESS')
|
|
||||||
system_config.remove_option('OAM_NETWORK', 'IP_UNIT_0_ADDRESS')
|
|
||||||
system_config.remove_option('OAM_NETWORK', 'IP_UNIT_1_ADDRESS')
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test detection of an invalid PXEBOOT_CIDR
|
|
||||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
|
||||||
system_config.set('PXEBOOT_NETWORK', 'PXEBOOT_CIDR',
|
|
||||||
'FD00::0000/64')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
system_config.set('PXEBOOT_NETWORK', 'PXEBOOT_CIDR',
|
|
||||||
'192.168.1.0/29')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
system_config.remove_option('PXEBOOT_NETWORK', 'PXEBOOT_CIDR')
|
|
||||||
with pytest.raises(configparser.NoOptionError):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(configparser.NoOptionError):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test overlap of MGMT_NETWORK CIDR
|
|
||||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
|
||||||
system_config.set('MGMT_NETWORK', 'CIDR', '192.168.203.0/26')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test invalid MGMT_NETWORK LAG_MODE
|
|
||||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
|
||||||
system_config.set('LOGICAL_INTERFACE_1', 'LAG_MODE', '2')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test MGMT_NETWORK VLAN not allowed
|
|
||||||
system_config = cr.parse_system_config(simple_systemfile)
|
|
||||||
system_config.set('MGMT_NETWORK', 'VLAN', '123')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test MGMT_NETWORK VLAN missing
|
|
||||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
|
||||||
system_config.remove_option('MGMT_NETWORK', 'VLAN')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test MGMT_NETWORK start address specified without end address
|
|
||||||
system_config = cr.parse_system_config(simple_systemfile)
|
|
||||||
system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '192.168.204.2')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test MGMT_NETWORK end address specified without start address
|
|
||||||
system_config = cr.parse_system_config(simple_systemfile)
|
|
||||||
system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS', '192.168.204.200')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test MGMT_NETWORK start and end range does not have enough addresses
|
|
||||||
system_config = cr.parse_system_config(static_addr_systemfile)
|
|
||||||
system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '192.168.204.2')
|
|
||||||
system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS', '192.168.204.8')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test MGMT_NETWORK start address not in subnet
|
|
||||||
system_config = cr.parse_system_config(simple_systemfile)
|
|
||||||
system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '192.168.200.2')
|
|
||||||
system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS', '192.168.204.254')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test MGMT_NETWORK end address not in subnet
|
|
||||||
system_config = cr.parse_system_config(simple_systemfile)
|
|
||||||
system_config.set('MGMT_NETWORK', 'IP_START_ADDRESS', '192.168.204.2')
|
|
||||||
system_config.set('MGMT_NETWORK', 'IP_END_ADDRESS', '192.168.214.254')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test overlap of CLUSTER_NETWORK CIDR
|
|
||||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
|
||||||
system_config.set('CLUSTER_NETWORK', 'CIDR', '192.168.203.0/26')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
system_config.set('CLUSTER_NETWORK', 'CIDR', '192.168.204.0/26')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test invalid CLUSTER_NETWORK LAG_MODE
|
|
||||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
|
||||||
system_config.add_section('LOGICAL_INTERFACE_2')
|
|
||||||
system_config.set('LOGICAL_INTERFACE_2', 'LAG_INTERFACE', 'Y')
|
|
||||||
system_config.set('LOGICAL_INTERFACE_2', 'LAG_MODE', '3')
|
|
||||||
system_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_MTU', '1500')
|
|
||||||
system_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_PORTS', 'eth3,eth4')
|
|
||||||
system_config.set('CLUSTER_NETWORK', 'LOGICAL_INTERFACE',
|
|
||||||
'LOGICAL_INTERFACE_2')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test CLUSTER_NETWORK VLAN overlap
|
|
||||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
|
||||||
system_config.set('CLUSTER_NETWORK', 'VLAN', '123')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test overlap of OAM_NETWORK CIDR
|
|
||||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
|
||||||
system_config.set('OAM_NETWORK', 'CIDR', '192.168.203.0/26')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
system_config.set('OAM_NETWORK', 'CIDR', '192.168.204.0/26')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
system_config.set('OAM_NETWORK', 'CIDR', '192.168.205.0/26')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test invalid OAM_NETWORK LAG_MODE
|
|
||||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
|
||||||
system_config.add_section('LOGICAL_INTERFACE_2')
|
|
||||||
system_config.set('LOGICAL_INTERFACE_2', 'LAG_INTERFACE', 'Y')
|
|
||||||
system_config.set('LOGICAL_INTERFACE_2', 'LAG_MODE', '3')
|
|
||||||
system_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_MTU', '1500')
|
|
||||||
system_config.set('LOGICAL_INTERFACE_2', 'INTERFACE_PORTS', 'eth3,eth4')
|
|
||||||
system_config.set('OAM_NETWORK', 'LOGICAL_INTERFACE',
|
|
||||||
'LOGICAL_INTERFACE_2')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test OAM_NETWORK VLAN overlap
|
|
||||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
|
||||||
system_config.set('OAM_NETWORK', 'VLAN', '123')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
system_config.set('OAM_NETWORK', 'VLAN', '126')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test OAM_NETWORK VLAN missing
|
|
||||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
|
||||||
system_config.remove_option('OAM_NETWORK', 'VLAN')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test missing gateway
|
|
||||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
|
||||||
system_config.remove_option('MGMT_NETWORK', 'GATEWAY')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test two gateways
|
|
||||||
system_config = cr.parse_system_config(lag_vlan_systemfile)
|
|
||||||
system_config.set('OAM_NETWORK', 'GATEWAY', '10.10.10.1')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test detection of unsupported NTP NTP_SERVER
|
|
||||||
system_config = cr.parse_system_config(simple_systemfile)
|
|
||||||
system_config.add_section('NTP')
|
|
||||||
system_config.set('NTP', 'NTP_SERVER_1', '0.pool.ntp.org')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
|
|
||||||
# Test detection of overspecification of MGMT network addresses
|
|
||||||
system_config = cr.parse_system_config(ceph_systemfile)
|
|
||||||
system_config.set('MGMT_NETWORK', 'IP_FLOATING_ADDRESS', '192.168.204.3')
|
|
||||||
system_config.set('MGMT_NETWORK', 'IP_IP_UNIT_0_ADDRESS', '192.168.204.6')
|
|
||||||
system_config.set('MGMT_NETWORK', 'IP_IP_UNIT_1_ADDRESS', '192.168.204.9')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test detection of overspecification of OAM network addresses
|
|
||||||
system_config = cr.parse_system_config(ceph_systemfile)
|
|
||||||
system_config.set('MGMT_NETWORK', 'IP_FLOATING_ADDRESS', '10.10.10.2')
|
|
||||||
system_config.set('MGMT_NETWORK', 'IP_IP_UNIT_0_ADDRESS', '10.10.10.3')
|
|
||||||
system_config.set('MGMT_NETWORK', 'IP_IP_UNIT_1_ADDRESS', '10.10.10.4')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test detection of invalid release version
|
|
||||||
system_config = cr.parse_system_config(ceph_systemfile)
|
|
||||||
system_config.set('VERSION', 'RELEASE', '15.12')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
|
|
||||||
def test_pxeboot_range():
|
|
||||||
""" Test import of system_config file for PXEBoot network address """
|
|
||||||
|
|
||||||
# Create the path to the system_config file
|
|
||||||
systemfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/", "system_config.pxeboot")
|
|
||||||
|
|
||||||
# Test import and generation of answer file
|
|
||||||
_test_system_config(systemfile)
|
|
||||||
|
|
||||||
# Test detection of invalid PXEBoot network start address
|
|
||||||
system_config = cr.parse_system_config(systemfile)
|
|
||||||
system_config.set('PXEBOOT_NETWORK', 'IP_START_ADDRESS', '8.123.122.345')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test detection of invalid PXEBoot network end address
|
|
||||||
system_config = cr.parse_system_config(systemfile)
|
|
||||||
system_config.set('PXEBOOT_NETWORK', 'IP_END_ADDRESS', '128.123.122.345')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test detection of smaller PXEBoot network end address
|
|
||||||
system_config = cr.parse_system_config(systemfile)
|
|
||||||
system_config.set('PXEBOOT_NETWORK', 'IP_END_ADDRESS', '192.168.102.30')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test detection of PXEBoot network range less than min required (8)
|
|
||||||
system_config = cr.parse_system_config(systemfile)
|
|
||||||
system_config.set('PXEBOOT_NETWORK', 'IP_END_ADDRESS', '128.123.122.34')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
|
|
||||||
def test_kubernetes():
|
|
||||||
""" Test import of system_config file for kubernetes """
|
|
||||||
|
|
||||||
# Create the path to the system_config file
|
|
||||||
systemfile = os.path.join(
|
|
||||||
os.getcwd(), "controllerconfig/tests/files/",
|
|
||||||
"system_config.kubernetes")
|
|
||||||
|
|
||||||
# Test import and generation of answer file
|
|
||||||
_test_system_config(systemfile)
|
|
||||||
|
|
||||||
# Test CLUSTER_NETWORK start address specified without end address
|
|
||||||
system_config = cr.parse_system_config(systemfile)
|
|
||||||
system_config.set('CLUSTER_NETWORK', 'IP_START_ADDRESS', '192.168.204.2')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test CLUSTER_NETWORK end address specified without start address
|
|
||||||
system_config = cr.parse_system_config(systemfile)
|
|
||||||
system_config.set('CLUSTER_NETWORK', 'IP_END_ADDRESS', '192.168.204.200')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test detection of overspecification of CLUSTER network addresses
|
|
||||||
system_config = cr.parse_system_config(systemfile)
|
|
||||||
system_config.set('CLUSTER_NETWORK', 'IP_FLOATING_ADDRESS',
|
|
||||||
'192.168.206.103')
|
|
||||||
system_config.set('CLUSTER_NETWORK', 'IP_IP_UNIT_0_ADDRESS',
|
|
||||||
'192.168.206.106')
|
|
||||||
system_config.set('CLUSTER_NETWORK', 'IP_IP_UNIT_1_ADDRESS',
|
|
||||||
'192.168.206.109')
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
with pytest.raises(exceptions.ConfigFail):
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test absence of optional DNS configuration
|
|
||||||
system_config = cr.parse_system_config(systemfile)
|
|
||||||
system_config.remove_section('DNS')
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test absence of optional docker proxy configuration
|
|
||||||
system_config = cr.parse_system_config(systemfile)
|
|
||||||
system_config.remove_section('DOCKER_PROXY')
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
||||||
|
|
||||||
# Test absence of optional docker registry configuration
|
|
||||||
system_config = cr.parse_system_config(systemfile)
|
|
||||||
system_config.remove_section('DOCKER_REGISTRY')
|
|
||||||
cr.create_cgcs_config_file(None, system_config, None, None, None, 0,
|
|
||||||
validate_only=True)
|
|
||||||
validate(system_config, DEFAULT_CONFIG, None, False)
|
|
|
@ -19,11 +19,12 @@ from cinderclient.v3 import client as cinder_client_v3
|
||||||
from glanceclient import Client
|
from glanceclient import Client
|
||||||
|
|
||||||
from cinderclient import utils as c_utils
|
from cinderclient import utils as c_utils
|
||||||
from controllerconfig.common import log
|
|
||||||
from controllerconfig.common.rest_api_utils import get_token
|
from controllerconfig.common.rest_api_utils import get_token
|
||||||
from controllerconfig.common.exceptions import TidyStorageFail
|
from controllerconfig.common.exceptions import TidyStorageFail
|
||||||
|
|
||||||
LOG = log.get_logger(__name__)
|
from oslo_log import log
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
KEYSTONE_AUTH_SERVER_RETRY_CNT = 60
|
KEYSTONE_AUTH_SERVER_RETRY_CNT = 60
|
||||||
KEYSTONE_AUTH_SERVER_WAIT = 1 # 1sec wait per retry
|
KEYSTONE_AUTH_SERVER_WAIT = 1 # 1sec wait per retry
|
||||||
|
@ -565,8 +566,6 @@ def main():
|
||||||
show_help()
|
show_help()
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
log.configure()
|
|
||||||
|
|
||||||
result_file = sys.argv[1]
|
result_file = sys.argv[1]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
#
|
#
|
||||||
# Copyright (c) 2016-2019 Wind River Systems, Inc.
|
# Copyright (c) 2016-2020 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
@ -42,13 +42,12 @@ from tsconfig.tsconfig import CONTROLLER_UPGRADE_STARTED_FLAG
|
||||||
from tsconfig.tsconfig import RESTORE_IN_PROGRESS_FLAG
|
from tsconfig.tsconfig import RESTORE_IN_PROGRESS_FLAG
|
||||||
|
|
||||||
from controllerconfig.common import constants
|
from controllerconfig.common import constants
|
||||||
from controllerconfig.common import log
|
|
||||||
from controllerconfig import utils as cutils
|
from controllerconfig import utils as cutils
|
||||||
from controllerconfig import backup_restore
|
|
||||||
|
|
||||||
from controllerconfig.upgrades import utils
|
from controllerconfig.upgrades import utils
|
||||||
|
|
||||||
LOG = log.get_logger(__name__)
|
from oslo_log import log
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
POSTGRES_MOUNT_PATH = '/mnt/postgresql'
|
POSTGRES_MOUNT_PATH = '/mnt/postgresql'
|
||||||
POSTGRES_DUMP_MOUNT_PATH = '/mnt/db_dump'
|
POSTGRES_DUMP_MOUNT_PATH = '/mnt/db_dump'
|
||||||
|
@ -865,8 +864,6 @@ def main():
|
||||||
exit(1)
|
exit(1)
|
||||||
arg += 1
|
arg += 1
|
||||||
|
|
||||||
log.configure()
|
|
||||||
|
|
||||||
if not from_release or not to_release:
|
if not from_release or not to_release:
|
||||||
print("Both the FROM_RELEASE and TO_RELEASE must be specified")
|
print("Both the FROM_RELEASE and TO_RELEASE must be specified")
|
||||||
exit(1)
|
exit(1)
|
||||||
|
@ -955,9 +952,10 @@ def extract_data_from_archive(archive, staging_dir, from_release, to_release):
|
||||||
extract_relative_directory(archive, 'config/ssh_config',
|
extract_relative_directory(archive, 'config/ssh_config',
|
||||||
tmp_config_path + '/ssh_config')
|
tmp_config_path + '/ssh_config')
|
||||||
|
|
||||||
|
# TODO: Switch this over to use Ansible
|
||||||
# Restore certificate files if they are in the archive
|
# Restore certificate files if they are in the archive
|
||||||
backup_restore.restore_etc_ssl_dir(archive,
|
# backup_restore.restore_etc_ssl_dir(archive,
|
||||||
configpath=tmp_config_path)
|
# configpath=tmp_config_path)
|
||||||
|
|
||||||
# Extract etc files
|
# Extract etc files
|
||||||
archive.extract('etc/hostname', '/')
|
archive.extract('etc/hostname', '/')
|
||||||
|
@ -975,11 +973,12 @@ def extract_data_from_archive(archive, staging_dir, from_release, to_release):
|
||||||
path = 'config/' + file
|
path = 'config/' + file
|
||||||
extract_relative_file(archive, path, tmp_config_path)
|
extract_relative_file(archive, path, tmp_config_path)
|
||||||
|
|
||||||
|
# TODO: Switch this over to use Ansible
|
||||||
# Extract distributed cloud addn_hosts file if present in archive.
|
# Extract distributed cloud addn_hosts file if present in archive.
|
||||||
if backup_restore.file_exists_in_archive(
|
# if backup_restore.file_exists_in_archive(
|
||||||
archive, 'config/dnsmasq.addn_hosts_dc'):
|
# archive, 'config/dnsmasq.addn_hosts_dc'):
|
||||||
extract_relative_file(
|
# extract_relative_file(
|
||||||
archive, 'config/dnsmasq.addn_hosts_dc', tmp_config_path)
|
# archive, 'config/dnsmasq.addn_hosts_dc', tmp_config_path)
|
||||||
|
|
||||||
|
|
||||||
def extract_postgres_data(archive):
|
def extract_postgres_data(archive):
|
||||||
|
@ -1114,7 +1113,8 @@ def upgrade_controller_simplex(backup_file):
|
||||||
to_release = metadata['upgrade']['to_release']
|
to_release = metadata['upgrade']['to_release']
|
||||||
|
|
||||||
check_load_version(to_release)
|
check_load_version(to_release)
|
||||||
backup_restore.check_load_subfunctions(archive, staging_dir)
|
# TODO: Switch this over to use Ansible
|
||||||
|
# backup_restore.check_load_subfunctions(archive, staging_dir)
|
||||||
|
|
||||||
# Patching is potentially a multi-phase step.
|
# Patching is potentially a multi-phase step.
|
||||||
# If the controller is impacted by patches from the backup,
|
# If the controller is impacted by patches from the backup,
|
||||||
|
@ -1271,7 +1271,8 @@ def upgrade_controller_simplex(backup_file):
|
||||||
LOG.info("Generating manifests for %s" %
|
LOG.info("Generating manifests for %s" %
|
||||||
sysinv_constants.CONTROLLER_0_HOSTNAME)
|
sysinv_constants.CONTROLLER_0_HOSTNAME)
|
||||||
|
|
||||||
backup_restore.configure_loopback_interface(archive)
|
# TODO: Switch this over to use Ansible
|
||||||
|
# backup_restore.configure_loopback_interface(archive)
|
||||||
|
|
||||||
print_log_info("Creating configs...")
|
print_log_info("Creating configs...")
|
||||||
cutils.create_system_config()
|
cutils.create_system_config()
|
||||||
|
@ -1301,10 +1302,10 @@ def upgrade_controller_simplex(backup_file):
|
||||||
|
|
||||||
cutils.apply_banner_customization()
|
cutils.apply_banner_customization()
|
||||||
|
|
||||||
backup_restore.restore_ldap(archive, backup_restore.ldap_permdir,
|
# TODO: Switch this over to use Ansible
|
||||||
staging_dir)
|
# backup_restore.restore_ldap(archive, backup_restore.ldap_permdir,
|
||||||
|
# staging_dir)
|
||||||
backup_restore.restore_std_dir(archive, backup_restore.home_permdir)
|
# backup_restore.restore_std_dir(archive, backup_restore.home_permdir)
|
||||||
|
|
||||||
archive.close()
|
archive.close()
|
||||||
shutil.rmtree(staging_dir, ignore_errors=True)
|
shutil.rmtree(staging_dir, ignore_errors=True)
|
||||||
|
@ -1352,8 +1353,6 @@ def simplex_main():
|
||||||
exit(1)
|
exit(1)
|
||||||
arg += 1
|
arg += 1
|
||||||
|
|
||||||
log.configure()
|
|
||||||
|
|
||||||
# Enforce that the command is being run from the console
|
# Enforce that the command is being run from the console
|
||||||
if cutils.is_ssh_parent():
|
if cutils.is_ssh_parent():
|
||||||
print (
|
print (
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
#
|
#
|
||||||
# Copyright (c) 2015-2019 Wind River Systems, Inc.
|
# Copyright (c) 2015-2020 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
@ -15,13 +15,13 @@ import subprocess
|
||||||
|
|
||||||
import tsconfig.tsconfig as tsc
|
import tsconfig.tsconfig as tsc
|
||||||
|
|
||||||
from controllerconfig import backup_restore
|
|
||||||
from controllerconfig.common import log
|
|
||||||
from controllerconfig.common import constants
|
from controllerconfig.common import constants
|
||||||
from sysinv.common import constants as sysinv_constants
|
from sysinv.common import constants as sysinv_constants
|
||||||
from controllerconfig.upgrades import utils
|
from controllerconfig.upgrades import utils
|
||||||
|
|
||||||
LOG = log.get_logger(__name__)
|
from oslo_log import log
|
||||||
|
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def get_upgrade_databases(shared_services):
|
def get_upgrade_databases(shared_services):
|
||||||
|
@ -197,8 +197,9 @@ def create_simplex_backup(software_upgrade):
|
||||||
with open(metadata_filename, 'w') as metadata_file:
|
with open(metadata_filename, 'w') as metadata_file:
|
||||||
metadata_file.write(json_data)
|
metadata_file.write(json_data)
|
||||||
|
|
||||||
backup_filename = get_upgrade_backup_filename(software_upgrade)
|
# TODO: Switch this over to use Ansible
|
||||||
backup_restore.backup(backup_filename, constants.BACKUPS_PATH)
|
# backup_filename = get_upgrade_backup_filename(software_upgrade)
|
||||||
|
# backup_restore.backup(backup_filename, constants.BACKUPS_PATH)
|
||||||
LOG.info("Create simplex backup complete")
|
LOG.info("Create simplex backup complete")
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
#
|
#
|
||||||
# Copyright (c) 2016-2019 Wind River Systems, Inc.
|
# Copyright (c) 2016-2020 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
@ -21,14 +21,13 @@ import yaml
|
||||||
from tsconfig.tsconfig import SW_VERSION
|
from tsconfig.tsconfig import SW_VERSION
|
||||||
from tsconfig.tsconfig import PLATFORM_PATH
|
from tsconfig.tsconfig import PLATFORM_PATH
|
||||||
|
|
||||||
from controllerconfig import DEFAULT_DOMAIN_NAME
|
|
||||||
from controllerconfig import utils as cutils
|
from controllerconfig import utils as cutils
|
||||||
from controllerconfig.common import log
|
|
||||||
from controllerconfig.common import constants
|
from controllerconfig.common import constants
|
||||||
from sysinv.common import constants as sysinv_constants
|
from sysinv.common import constants as sysinv_constants
|
||||||
|
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
LOG = log.get_logger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
POSTGRES_PATH = '/var/lib/postgresql'
|
POSTGRES_PATH = '/var/lib/postgresql'
|
||||||
POSTGRES_DATA_DIR = os.path.join(POSTGRES_PATH, SW_VERSION)
|
POSTGRES_DATA_DIR = os.path.join(POSTGRES_PATH, SW_VERSION)
|
||||||
|
@ -36,6 +35,9 @@ RABBIT_PATH = '/var/lib/rabbitmq'
|
||||||
CONTROLLER_1_HOSTNAME = "controller-1"
|
CONTROLLER_1_HOSTNAME = "controller-1"
|
||||||
DB_CONNECTION = "postgresql://%s:%s@127.0.0.1/%s\n"
|
DB_CONNECTION = "postgresql://%s:%s@127.0.0.1/%s\n"
|
||||||
|
|
||||||
|
# well-known default domain name
|
||||||
|
DEFAULT_DOMAIN_NAME = 'Default'
|
||||||
|
|
||||||
# Migration script actions
|
# Migration script actions
|
||||||
ACTION_START = "start"
|
ACTION_START = "start"
|
||||||
ACTION_MIGRATE = "migrate"
|
ACTION_MIGRATE = "migrate"
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
#
|
#
|
||||||
# Copyright (c) 2014-2019 Wind River Systems, Inc.
|
# Copyright (c) 2014-2020 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
@ -8,151 +8,27 @@
|
||||||
Utilities
|
Utilities
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import collections
|
|
||||||
import errno
|
|
||||||
import glob
|
import glob
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
import socket
|
|
||||||
import subprocess
|
import subprocess
|
||||||
import time
|
import time
|
||||||
import sys
|
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from six.moves import configparser
|
|
||||||
import re
|
import re
|
||||||
import six
|
import six
|
||||||
|
|
||||||
import netaddr
|
import netaddr
|
||||||
from tsconfig import tsconfig
|
from tsconfig import tsconfig
|
||||||
from sysinv.common import constants as sysinv_constants
|
|
||||||
|
|
||||||
from controllerconfig.common import constants
|
from controllerconfig.common import constants
|
||||||
from controllerconfig.common import log
|
|
||||||
from controllerconfig.common.exceptions import ValidateFail
|
from controllerconfig.common.exceptions import ValidateFail
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
LOOPBACK_IFNAME = 'lo'
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
NETWORK_SCRIPTS_PATH = '/etc/sysconfig/network-scripts'
|
|
||||||
NETWORK_SCRIPTS_PREFIX = 'ifcfg'
|
|
||||||
NETWORK_SCRIPTS_LOOPBACK = '%s-%s' % (NETWORK_SCRIPTS_PREFIX, LOOPBACK_IFNAME)
|
|
||||||
|
|
||||||
BOND_MIIMON_DEFAULT = 100
|
|
||||||
|
|
||||||
|
|
||||||
LOG = log.get_logger(__name__)
|
|
||||||
|
|
||||||
DEVNULL = open(os.devnull, 'w')
|
DEVNULL = open(os.devnull, 'w')
|
||||||
|
|
||||||
EXPECTED_SERVICE_NAME_AND_TYPE = (
|
|
||||||
{"KEYSTONE_SERVICE_NAME": "keystone",
|
|
||||||
"KEYSTONE_SERVICE_TYPE": "identity",
|
|
||||||
"SYSINV_SERVICE_NAME": "sysinv",
|
|
||||||
"SYSINV_SERVICE_TYPE": "platform",
|
|
||||||
"PATCHING_SERVICE_NAME": "patching",
|
|
||||||
"PATCHING_SERVICE_TYPE": "patching",
|
|
||||||
"NFV_SERVICE_NAME": "vim",
|
|
||||||
"NFV_SERVICE_TYPE": "nfv",
|
|
||||||
"FM_SERVICE_NAME": "fm",
|
|
||||||
"FM_SERVICE_TYPE": "faultmanagement",
|
|
||||||
"BARBICAN_SERVICE_NAME": "barbican",
|
|
||||||
"BARBICAN_SERVICE_TYPE": "key-manager",
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
def filesystem_get_free_space(path):
|
|
||||||
""" Get Free space of directory """
|
|
||||||
statvfs = os.statvfs(path)
|
|
||||||
return (statvfs.f_frsize * statvfs.f_bavail)
|
|
||||||
|
|
||||||
|
|
||||||
def directory_get_size(start_dir, regex=None):
|
|
||||||
"""
|
|
||||||
Get total size of a directory tree in bytes
|
|
||||||
:param start_dir: top of tree
|
|
||||||
:param regex: only include files matching this regex (if provided)
|
|
||||||
:return: size in bytes
|
|
||||||
"""
|
|
||||||
total_size = 0
|
|
||||||
for dirpath, _, filenames in os.walk(start_dir):
|
|
||||||
for filename in filenames:
|
|
||||||
if regex is None or regex.match(filename):
|
|
||||||
filep = os.path.join(dirpath, filename)
|
|
||||||
try:
|
|
||||||
total_size += os.path.getsize(filep)
|
|
||||||
except OSError as e:
|
|
||||||
if e.errno != errno.ENOENT:
|
|
||||||
raise e
|
|
||||||
return total_size
|
|
||||||
|
|
||||||
|
|
||||||
def print_bytes(sizeof):
|
|
||||||
""" Pretty print bytes """
|
|
||||||
for size in ['Bytes', 'KB', 'MB', 'GB', 'TB']:
|
|
||||||
if abs(sizeof) < 1024.0:
|
|
||||||
return "%3.1f %s" % (sizeof, size)
|
|
||||||
sizeof /= 1024.0
|
|
||||||
|
|
||||||
|
|
||||||
def modprobe_drbd():
|
|
||||||
"""Load DRBD module"""
|
|
||||||
try:
|
|
||||||
mod_parms = subprocess.check_output(['drbdadm', 'sh-mod-parms'],
|
|
||||||
close_fds=True).rstrip()
|
|
||||||
subprocess.call(["modprobe", "-s", "drbd", mod_parms], stdout=DEVNULL)
|
|
||||||
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
LOG.error("Failed to load drbd module")
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def drbd_start(resource):
|
|
||||||
"""Start drbd resource"""
|
|
||||||
try:
|
|
||||||
subprocess.check_call(["drbdadm", "up", resource],
|
|
||||||
stdout=DEVNULL)
|
|
||||||
|
|
||||||
subprocess.check_call(["drbdadm", "primary", resource],
|
|
||||||
stdout=DEVNULL)
|
|
||||||
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
LOG.error("Failed to start drbd %s" % resource)
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def drbd_stop(resource):
|
|
||||||
"""Stop drbd resource"""
|
|
||||||
try:
|
|
||||||
subprocess.check_call(["drbdadm", "secondary", resource],
|
|
||||||
stdout=DEVNULL)
|
|
||||||
# Allow time for demotion to be processed
|
|
||||||
time.sleep(1)
|
|
||||||
subprocess.check_call(["drbdadm", "down", resource], stdout=DEVNULL)
|
|
||||||
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
LOG.error("Failed to stop drbd %s" % resource)
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def mount(device, directory):
|
|
||||||
"""Mount a directory"""
|
|
||||||
try:
|
|
||||||
subprocess.check_call(["mount", device, directory], stdout=DEVNULL)
|
|
||||||
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
LOG.error("Failed to mount %s filesystem" % directory)
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def umount(directory):
|
|
||||||
"""Unmount a directory"""
|
|
||||||
try:
|
|
||||||
subprocess.check_call(["umount", directory], stdout=DEVNULL)
|
|
||||||
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
LOG.error("Failed to umount %s filesystem" % directory)
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def start_service(name):
|
def start_service(name):
|
||||||
""" Start a systemd service """
|
""" Start a systemd service """
|
||||||
|
@ -181,48 +57,6 @@ def restart_service(name):
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
def start_lsb_service(name):
|
|
||||||
""" Start a Linux Standard Base service """
|
|
||||||
try:
|
|
||||||
script = os.path.join("/etc/init.d", name)
|
|
||||||
# Call the script with SYSTEMCTL_SKIP_REDIRECT=1 in the environment
|
|
||||||
subprocess.check_call([script, "start"],
|
|
||||||
env=dict(os.environ,
|
|
||||||
**{"SYSTEMCTL_SKIP_REDIRECT": "1"}),
|
|
||||||
stdout=DEVNULL)
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
LOG.error("Failed to start %s service" % name)
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def stop_lsb_service(name):
|
|
||||||
""" Stop a Linux Standard Base service """
|
|
||||||
try:
|
|
||||||
script = os.path.join("/etc/init.d", name)
|
|
||||||
# Call the script with SYSTEMCTL_SKIP_REDIRECT=1 in the environment
|
|
||||||
subprocess.check_call([script, "stop"],
|
|
||||||
env=dict(os.environ,
|
|
||||||
**{"SYSTEMCTL_SKIP_REDIRECT": "1"}),
|
|
||||||
stdout=DEVNULL)
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
LOG.error("Failed to stop %s service" % name)
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def restart_lsb_service(name):
|
|
||||||
""" Restart a Linux Standard Base service """
|
|
||||||
try:
|
|
||||||
script = os.path.join("/etc/init.d", name)
|
|
||||||
# Call the script with SYSTEMCTL_SKIP_REDIRECT=1 in the environment
|
|
||||||
subprocess.check_call([script, "restart"],
|
|
||||||
env=dict(os.environ,
|
|
||||||
**{"SYSTEMCTL_SKIP_REDIRECT": "1"}),
|
|
||||||
stdout=DEVNULL)
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
LOG.error("Failed to restart %s service" % name)
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def check_sm_service(service, state):
|
def check_sm_service(service, state):
|
||||||
""" Check whether an SM service has the supplied state """
|
""" Check whether an SM service has the supplied state """
|
||||||
try:
|
try:
|
||||||
|
@ -245,34 +79,6 @@ def wait_sm_service(service, timeout=180):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def is_active(service):
|
|
||||||
""" Check whether an SM service is active """
|
|
||||||
return check_sm_service(service, 'enabled-active')
|
|
||||||
|
|
||||||
|
|
||||||
def get_controller_hostname():
|
|
||||||
"""
|
|
||||||
Get the hostname for this controller
|
|
||||||
:return: controller hostname
|
|
||||||
"""
|
|
||||||
return socket.gethostname()
|
|
||||||
|
|
||||||
|
|
||||||
def get_mate_controller_hostname():
|
|
||||||
"""
|
|
||||||
Get the hostname for the mate controller
|
|
||||||
:return: mate controller hostname
|
|
||||||
"""
|
|
||||||
my_hostname = socket.gethostname()
|
|
||||||
if my_hostname.endswith('-0'):
|
|
||||||
postfix = '-1'
|
|
||||||
elif my_hostname.endswith('-1'):
|
|
||||||
postfix = '-0'
|
|
||||||
else:
|
|
||||||
raise Exception("Invalid controller hostname")
|
|
||||||
return my_hostname.rsplit('-', 1)[0] + postfix
|
|
||||||
|
|
||||||
|
|
||||||
def get_address_from_hosts_file(hostname):
|
def get_address_from_hosts_file(hostname):
|
||||||
"""
|
"""
|
||||||
Get the IP address of a host from the /etc/hosts file
|
Get the IP address of a host from the /etc/hosts file
|
||||||
|
@ -286,303 +92,6 @@ def get_address_from_hosts_file(hostname):
|
||||||
raise Exception("Hostname %s not found in /etc/hosts" % hostname)
|
raise Exception("Hostname %s not found in /etc/hosts" % hostname)
|
||||||
|
|
||||||
|
|
||||||
def validate_and_normalize_mac(address):
|
|
||||||
"""Validate a MAC address and return normalized form.
|
|
||||||
|
|
||||||
Checks whether the supplied MAC address is formally correct and
|
|
||||||
normalize it to all lower case.
|
|
||||||
|
|
||||||
:param address: MAC address to be validated and normalized.
|
|
||||||
:returns: Normalized and validated MAC address.
|
|
||||||
:raises: InvalidMAC If the MAC address is not valid.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if not is_valid_mac(address):
|
|
||||||
raise Exception("InvalidMAC %s" % address)
|
|
||||||
return address.lower()
|
|
||||||
|
|
||||||
|
|
||||||
def is_valid_ip(address):
|
|
||||||
if not is_valid_ipv4(address):
|
|
||||||
return is_valid_ipv6(address)
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def lag_mode_to_str(lag_mode):
|
|
||||||
if lag_mode == 0:
|
|
||||||
return "balance-rr"
|
|
||||||
if lag_mode == 1:
|
|
||||||
return "active-backup"
|
|
||||||
elif lag_mode == 2:
|
|
||||||
return "balance-xor"
|
|
||||||
elif lag_mode == 3:
|
|
||||||
return "broadcast"
|
|
||||||
elif lag_mode == 4:
|
|
||||||
return "802.3ad"
|
|
||||||
elif lag_mode == 5:
|
|
||||||
return "balance-tlb"
|
|
||||||
elif lag_mode == 6:
|
|
||||||
return "balance-alb"
|
|
||||||
else:
|
|
||||||
raise Exception(
|
|
||||||
"Invalid LAG_MODE value of %d. Valid values: 0-6" % lag_mode)
|
|
||||||
|
|
||||||
|
|
||||||
def is_combined_load():
|
|
||||||
return 'worker' in tsconfig.subfunctions
|
|
||||||
|
|
||||||
|
|
||||||
def get_system_type():
|
|
||||||
if is_combined_load():
|
|
||||||
return sysinv_constants.TIS_AIO_BUILD
|
|
||||||
return sysinv_constants.TIS_STD_BUILD
|
|
||||||
|
|
||||||
|
|
||||||
def get_security_profile():
|
|
||||||
eprofile = sysinv_constants.SYSTEM_SECURITY_PROFILE_EXTENDED
|
|
||||||
if tsconfig.security_profile == eprofile:
|
|
||||||
return eprofile
|
|
||||||
return sysinv_constants.SYSTEM_SECURITY_PROFILE_STANDARD
|
|
||||||
|
|
||||||
|
|
||||||
def is_cpe():
|
|
||||||
return get_system_type() == sysinv_constants.TIS_AIO_BUILD
|
|
||||||
|
|
||||||
|
|
||||||
def get_interface_config_common(device, mtu=None):
|
|
||||||
"""
|
|
||||||
Return the interface configuration parameters that is common to all
|
|
||||||
device types.
|
|
||||||
"""
|
|
||||||
parameters = collections.OrderedDict()
|
|
||||||
parameters['BOOTPROTO'] = 'none'
|
|
||||||
parameters['ONBOOT'] = 'yes'
|
|
||||||
parameters['DEVICE'] = device
|
|
||||||
# Increased to accommodate devices that require more time to
|
|
||||||
# complete link auto-negotiation
|
|
||||||
parameters['LINKDELAY'] = '20'
|
|
||||||
if mtu:
|
|
||||||
parameters['MTU'] = mtu
|
|
||||||
return parameters
|
|
||||||
|
|
||||||
|
|
||||||
def get_interface_config_ipv4(ip_address, ip_subnet, ip_gateway):
|
|
||||||
"""
|
|
||||||
Return the interface configuration parameters for all IPv4 static
|
|
||||||
addressing.
|
|
||||||
"""
|
|
||||||
parameters = collections.OrderedDict()
|
|
||||||
parameters['IPADDR'] = ip_address
|
|
||||||
parameters['NETMASK'] = ip_subnet.netmask
|
|
||||||
parameters['BROADCAST'] = ip_subnet.broadcast
|
|
||||||
if ip_gateway:
|
|
||||||
parameters['GATEWAY'] = ip_gateway
|
|
||||||
return parameters
|
|
||||||
|
|
||||||
|
|
||||||
def get_interface_config_ipv6(ip_address, ip_subnet, ip_gateway):
|
|
||||||
"""
|
|
||||||
Return the interface configuration parameters for all IPv6 static
|
|
||||||
addressing.
|
|
||||||
"""
|
|
||||||
parameters = collections.OrderedDict()
|
|
||||||
parameters['IPV6INIT'] = 'yes'
|
|
||||||
parameters['IPV6ADDR'] = netaddr.IPNetwork('%s/%u' % (ip_address,
|
|
||||||
ip_subnet.prefixlen))
|
|
||||||
if ip_gateway:
|
|
||||||
parameters['IPV6_DEFAULTGW'] = ip_gateway
|
|
||||||
return parameters
|
|
||||||
|
|
||||||
|
|
||||||
def get_interface_config_static(ip_address, ip_subnet, ip_gateway=None):
|
|
||||||
"""
|
|
||||||
Return the interface configuration parameters for all IP static
|
|
||||||
addressing.
|
|
||||||
"""
|
|
||||||
if netaddr.IPAddress(ip_address).version == 4:
|
|
||||||
return get_interface_config_ipv4(ip_address, ip_subnet, ip_gateway)
|
|
||||||
else:
|
|
||||||
return get_interface_config_ipv6(ip_address, ip_subnet, ip_gateway)
|
|
||||||
|
|
||||||
|
|
||||||
def write_interface_config_file(device, parameters):
|
|
||||||
"""
|
|
||||||
Write interface configuration parameters to the network scripts
|
|
||||||
directory named after the supplied device.
|
|
||||||
|
|
||||||
:param device device name as str
|
|
||||||
:param parameters dict of parameters
|
|
||||||
"""
|
|
||||||
filename = os.path.join(NETWORK_SCRIPTS_PATH, "%s-%s" %
|
|
||||||
(NETWORK_SCRIPTS_PREFIX, device))
|
|
||||||
try:
|
|
||||||
with open(filename, 'w') as f:
|
|
||||||
for parameter, value in parameters.items():
|
|
||||||
f.write("%s=%s\n" % (parameter, str(value)))
|
|
||||||
except IOError:
|
|
||||||
LOG.error("Failed to create file: %s" % filename)
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def write_interface_config_ethernet(device, mtu=None, parameters=None):
|
|
||||||
"""Write the interface configuration for an Ethernet device."""
|
|
||||||
config = get_interface_config_common(device, mtu)
|
|
||||||
if parameters:
|
|
||||||
config.update(parameters)
|
|
||||||
write_interface_config_file(device, config)
|
|
||||||
|
|
||||||
|
|
||||||
def write_interface_config_vlan(device, mtu, parameters=None):
|
|
||||||
"""Write the interface configuration for a VLAN device."""
|
|
||||||
config = get_interface_config_vlan()
|
|
||||||
if parameters:
|
|
||||||
config.update(parameters)
|
|
||||||
write_interface_config_ethernet(device, mtu, parameters=config)
|
|
||||||
|
|
||||||
|
|
||||||
def write_interface_config_slave(device, master, parameters=None):
|
|
||||||
"""Write the interface configuration for a bond slave device."""
|
|
||||||
config = get_interface_config_slave(master)
|
|
||||||
if parameters:
|
|
||||||
config.update(parameters)
|
|
||||||
write_interface_config_ethernet(device, parameters=config)
|
|
||||||
|
|
||||||
|
|
||||||
def write_interface_config_bond(device, mtu, mode, txhash, miimon,
|
|
||||||
member1, member2, parameters=None):
|
|
||||||
"""Write the interface configuration for a bond master device."""
|
|
||||||
config = get_interface_config_bond(mode, txhash, miimon)
|
|
||||||
if parameters:
|
|
||||||
config.update(parameters)
|
|
||||||
write_interface_config_ethernet(device, mtu, parameters=config)
|
|
||||||
|
|
||||||
# create slave device configuration files
|
|
||||||
if member1:
|
|
||||||
write_interface_config_slave(member1, device)
|
|
||||||
if member2:
|
|
||||||
write_interface_config_slave(member2, device)
|
|
||||||
|
|
||||||
|
|
||||||
def get_interface_config_vlan():
|
|
||||||
"""
|
|
||||||
Return the interface configuration parameters for all IP static
|
|
||||||
addressing.
|
|
||||||
"""
|
|
||||||
parameters = collections.OrderedDict()
|
|
||||||
parameters['VLAN'] = 'yes'
|
|
||||||
return parameters
|
|
||||||
|
|
||||||
|
|
||||||
def get_interface_config_slave(master):
|
|
||||||
"""
|
|
||||||
Return the interface configuration parameters for bond interface
|
|
||||||
slave devices.
|
|
||||||
"""
|
|
||||||
parameters = collections.OrderedDict()
|
|
||||||
parameters['MASTER'] = master
|
|
||||||
parameters['SLAVE'] = 'yes'
|
|
||||||
parameters['PROMISC'] = 'yes'
|
|
||||||
return parameters
|
|
||||||
|
|
||||||
|
|
||||||
def get_interface_config_bond(mode, txhash, miimon):
|
|
||||||
"""
|
|
||||||
Return the interface configuration parameters for bond interface
|
|
||||||
master devices.
|
|
||||||
"""
|
|
||||||
options = "mode=%s miimon=%s" % (mode, miimon)
|
|
||||||
|
|
||||||
if txhash:
|
|
||||||
options += " xmit_hash_policy=%s" % txhash
|
|
||||||
|
|
||||||
if mode == constants.LAG_MODE_8023AD:
|
|
||||||
options += " lacp_rate=fast"
|
|
||||||
|
|
||||||
parameters = collections.OrderedDict()
|
|
||||||
parameters['BONDING_OPTS'] = "\"%s\"" % options
|
|
||||||
return parameters
|
|
||||||
|
|
||||||
|
|
||||||
def remove_interface_config_files(stdout=None, stderr=None):
|
|
||||||
"""
|
|
||||||
Remove all existing interface configuration files.
|
|
||||||
"""
|
|
||||||
files = glob.glob1(NETWORK_SCRIPTS_PATH, "%s-*" % NETWORK_SCRIPTS_PREFIX)
|
|
||||||
for file in [f for f in files if f != NETWORK_SCRIPTS_LOOPBACK]:
|
|
||||||
ifname = file[len(NETWORK_SCRIPTS_PREFIX) + 1:] # remove prefix
|
|
||||||
subprocess.check_call(["ifdown", ifname],
|
|
||||||
stdout=stdout, stderr=stderr)
|
|
||||||
os.remove(os.path.join(NETWORK_SCRIPTS_PATH, file))
|
|
||||||
|
|
||||||
|
|
||||||
def remove_interface_ip_address(device, ip_address, ip_subnet,
|
|
||||||
stdout=None, stderr=None):
|
|
||||||
"""Remove an IP address from an interface"""
|
|
||||||
subprocess.check_call(
|
|
||||||
["ip", "addr", "del",
|
|
||||||
str(ip_address) + "/" + str(ip_subnet.prefixlen),
|
|
||||||
"dev", device],
|
|
||||||
stdout=stdout, stderr=stderr)
|
|
||||||
|
|
||||||
|
|
||||||
def send_interface_garp(device, ip_address, stdout=None, stderr=None):
|
|
||||||
"""Send a GARP message for the supplied address"""
|
|
||||||
subprocess.call(
|
|
||||||
["arping", "-c", "3", "-A", "-q", "-I",
|
|
||||||
device, str(ip_address)],
|
|
||||||
stdout=stdout, stderr=stderr)
|
|
||||||
|
|
||||||
|
|
||||||
def restart_networking(stdout=None, stderr=None):
|
|
||||||
"""
|
|
||||||
Restart networking services.
|
|
||||||
"""
|
|
||||||
# Kill any leftover dhclient process from the boot
|
|
||||||
subprocess.call(["pkill", "dhclient"])
|
|
||||||
|
|
||||||
# remove any existing IP addresses
|
|
||||||
ifs = glob.glob1('/sys/class/net', "*")
|
|
||||||
for i in [i for i in ifs if i != LOOPBACK_IFNAME]:
|
|
||||||
subprocess.call(
|
|
||||||
["ip", "link", "set", "dev", i, "down"])
|
|
||||||
subprocess.call(
|
|
||||||
["ip", "addr", "flush", "dev", i])
|
|
||||||
subprocess.call(
|
|
||||||
["ip", "-6", "addr", "flush", "dev", i])
|
|
||||||
|
|
||||||
subprocess.check_call(["systemctl", "restart", "network"],
|
|
||||||
stdout=stdout, stderr=stderr)
|
|
||||||
|
|
||||||
|
|
||||||
def output_to_dict(output):
|
|
||||||
dict = {}
|
|
||||||
output = [_f for _f in output.split('\n') if _f]
|
|
||||||
|
|
||||||
for row in output:
|
|
||||||
values = row.split()
|
|
||||||
if len(values) != 2:
|
|
||||||
raise Exception("The following output does not respect the "
|
|
||||||
"format: %s" % row)
|
|
||||||
dict[values[1]] = values[0]
|
|
||||||
|
|
||||||
return dict
|
|
||||||
|
|
||||||
|
|
||||||
def get_install_uuid():
|
|
||||||
""" Get the install uuid from the feed directory. """
|
|
||||||
uuid_fname = None
|
|
||||||
try:
|
|
||||||
uuid_dir = '/www/pages/feed/rel-' + tsconfig.SW_VERSION
|
|
||||||
uuid_fname = os.path.join(uuid_dir, 'install_uuid')
|
|
||||||
with open(uuid_fname, 'r') as uuid_file:
|
|
||||||
install_uuid = uuid_file.readline().rstrip()
|
|
||||||
except IOError:
|
|
||||||
LOG.error("Failed to open file: %s", uuid_fname)
|
|
||||||
raise Exception("Failed to retrieve install UUID")
|
|
||||||
|
|
||||||
return install_uuid
|
|
||||||
|
|
||||||
|
|
||||||
def write_simplex_flag():
|
def write_simplex_flag():
|
||||||
""" Write simplex flag. """
|
""" Write simplex flag. """
|
||||||
simplex_flag = "/etc/platform/simplex"
|
simplex_flag = "/etc/platform/simplex"
|
||||||
|
@ -634,37 +143,6 @@ def apply_manifest(controller_address_0, personality, manifest, hieradata,
|
||||||
raise Exception(msg)
|
raise Exception(msg)
|
||||||
|
|
||||||
|
|
||||||
def create_system_controller_config(filename):
|
|
||||||
""" Create any additional parameters needed for system controller"""
|
|
||||||
# set keystone endpoint region name and sysinv keystone authtoken
|
|
||||||
# region name
|
|
||||||
config = {
|
|
||||||
'keystone::endpoint::region':
|
|
||||||
sysinv_constants.SYSTEM_CONTROLLER_REGION,
|
|
||||||
'sysinv::region_name':
|
|
||||||
sysinv_constants.SYSTEM_CONTROLLER_REGION,
|
|
||||||
}
|
|
||||||
try:
|
|
||||||
with open(filename, 'w') as f:
|
|
||||||
yaml.dump(config, f, default_flow_style=False)
|
|
||||||
except Exception:
|
|
||||||
LOG.exception("failed to write config file: %s" % filename)
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def create_static_config():
|
|
||||||
cmd = ["/usr/bin/sysinv-puppet",
|
|
||||||
"create-static-config",
|
|
||||||
constants.HIERADATA_WORKDIR]
|
|
||||||
try:
|
|
||||||
os.makedirs(constants.HIERADATA_WORKDIR)
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
msg = "Failed to create puppet hiera static config"
|
|
||||||
print(msg)
|
|
||||||
raise Exception(msg)
|
|
||||||
|
|
||||||
|
|
||||||
def create_system_config():
|
def create_system_config():
|
||||||
cmd = ["/usr/bin/sysinv-puppet",
|
cmd = ["/usr/bin/sysinv-puppet",
|
||||||
"create-system-config",
|
"create-system-config",
|
||||||
|
@ -692,34 +170,6 @@ def create_host_config(hostname=None):
|
||||||
raise Exception(msg)
|
raise Exception(msg)
|
||||||
|
|
||||||
|
|
||||||
def shutdown_file_systems():
|
|
||||||
""" Shutdown filesystems """
|
|
||||||
|
|
||||||
umount("/var/lib/postgresql")
|
|
||||||
drbd_stop("drbd-pgsql")
|
|
||||||
|
|
||||||
stop_service("www-pages-helm_charts.mount")
|
|
||||||
umount("/opt/platform")
|
|
||||||
drbd_stop("drbd-platform")
|
|
||||||
|
|
||||||
umount("/opt/extension")
|
|
||||||
drbd_stop("drbd-extension")
|
|
||||||
|
|
||||||
if os.path.exists("/opt/patch-vault"):
|
|
||||||
umount("/opt/patch-vault")
|
|
||||||
drbd_stop("drbd-patch-vault")
|
|
||||||
|
|
||||||
umount("/var/lib/rabbitmq")
|
|
||||||
drbd_stop("drbd-rabbit")
|
|
||||||
|
|
||||||
stop_service("etcd.service")
|
|
||||||
stop_service("opt-etcd.mount")
|
|
||||||
drbd_stop("drbd-etcd")
|
|
||||||
|
|
||||||
umount("/var/lib/docker-distribution")
|
|
||||||
drbd_stop("drbd-dockerdistribution")
|
|
||||||
|
|
||||||
|
|
||||||
def persist_config():
|
def persist_config():
|
||||||
"""Copy temporary config files into new DRBD filesystem"""
|
"""Copy temporary config files into new DRBD filesystem"""
|
||||||
|
|
||||||
|
@ -862,24 +312,6 @@ def configure_hostname(hostname):
|
||||||
raise Exception("Failed to configure hostname")
|
raise Exception("Failed to configure hostname")
|
||||||
|
|
||||||
|
|
||||||
def progress(steps, step, action, result, newline=False):
|
|
||||||
"""Display progress."""
|
|
||||||
if steps == 0:
|
|
||||||
hashes = 45
|
|
||||||
percentage = 100
|
|
||||||
else:
|
|
||||||
hashes = (step * 45) / steps
|
|
||||||
percentage = (step * 100) / steps
|
|
||||||
|
|
||||||
sys.stdout.write("\rStep {0:{width}d} of {1:d} [{2:45s}] "
|
|
||||||
"[{3:d}%]".format(min(step, steps), steps,
|
|
||||||
'#' * hashes, percentage,
|
|
||||||
width=len(str(steps))))
|
|
||||||
if step == steps or newline:
|
|
||||||
sys.stdout.write("\n")
|
|
||||||
sys.stdout.flush()
|
|
||||||
|
|
||||||
|
|
||||||
def touch(fname):
|
def touch(fname):
|
||||||
with open(fname, 'a'):
|
with open(fname, 'a'):
|
||||||
os.utime(fname, None)
|
os.utime(fname, None)
|
||||||
|
@ -898,47 +330,6 @@ def is_ssh_parent():
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def is_valid_vlan(vlan):
|
|
||||||
"""Determine whether vlan is valid."""
|
|
||||||
try:
|
|
||||||
if 0 < int(vlan) < 4095:
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
except (ValueError, TypeError):
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def is_mtu_valid(mtu):
|
|
||||||
"""Determine whether a mtu is valid."""
|
|
||||||
try:
|
|
||||||
if int(mtu) < 576:
|
|
||||||
return False
|
|
||||||
elif int(mtu) > 9216:
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
return True
|
|
||||||
except (ValueError, TypeError):
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def is_valid_hostname(hostname):
|
|
||||||
"""Determine whether a hostname is valid as per RFC 1123."""
|
|
||||||
|
|
||||||
# Maximum length of 255
|
|
||||||
if not hostname or len(hostname) > 255:
|
|
||||||
return False
|
|
||||||
# Allow a single dot on the right hand side
|
|
||||||
if hostname[-1] == ".":
|
|
||||||
hostname = hostname[:-1]
|
|
||||||
# Create a regex to ensure:
|
|
||||||
# - hostname does not begin or end with a dash
|
|
||||||
# - each segment is 1 to 63 characters long
|
|
||||||
# - valid characters are A-Z (any case) and 0-9
|
|
||||||
valid_re = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE) # noqa pylint: disable=anomalous-backslash-in-string
|
|
||||||
return all(valid_re.match(x) for x in hostname.split("."))
|
|
||||||
|
|
||||||
|
|
||||||
def is_valid_mac(mac):
|
def is_valid_mac(mac):
|
||||||
"""Verify the format of a MAC addres."""
|
"""Verify the format of a MAC addres."""
|
||||||
if not mac:
|
if not mac:
|
||||||
|
@ -969,32 +360,6 @@ def validate_network_str(network_str, minimum_size,
|
||||||
"Invalid subnet - not a valid IP subnet")
|
"Invalid subnet - not a valid IP subnet")
|
||||||
|
|
||||||
|
|
||||||
def is_valid_filename(filename):
|
|
||||||
return '\0' not in filename
|
|
||||||
|
|
||||||
|
|
||||||
def is_valid_by_path(filename):
|
|
||||||
return "/dev/disk/by-path" in filename and "-part" not in filename
|
|
||||||
|
|
||||||
|
|
||||||
def is_valid_url(url_str):
|
|
||||||
# Django URL validation patterns
|
|
||||||
r = re.compile(
|
|
||||||
r'^(?:http|ftp)s?://' # http:// or https://
|
|
||||||
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)' # domain...
|
|
||||||
r'+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
|
|
||||||
r'localhost|' # localhost...
|
|
||||||
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
|
|
||||||
r'(?::\d+)?' # optional port
|
|
||||||
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
|
|
||||||
|
|
||||||
url = r.match(url_str)
|
|
||||||
if url:
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def is_valid_domain(url_str):
|
def is_valid_domain(url_str):
|
||||||
r = re.compile(
|
r = re.compile(
|
||||||
r'^(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)' # domain...
|
r'^(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)' # domain...
|
||||||
|
@ -1010,54 +375,6 @@ def is_valid_domain(url_str):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def is_valid_ipv4(address):
|
|
||||||
"""Verify that address represents a valid IPv4 address."""
|
|
||||||
try:
|
|
||||||
return netaddr.valid_ipv4(address)
|
|
||||||
except Exception:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def is_valid_ipv6(address):
|
|
||||||
try:
|
|
||||||
return netaddr.valid_ipv6(address)
|
|
||||||
except Exception:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def is_valid_domain_or_ip(url_str):
|
|
||||||
if url_str:
|
|
||||||
if is_valid_domain(url_str):
|
|
||||||
return True
|
|
||||||
ip_with_port = url_str.split(':')
|
|
||||||
if len(ip_with_port) <= 2:
|
|
||||||
# check ipv4 or ipv4 with port
|
|
||||||
return is_valid_ipv4(ip_with_port[0])
|
|
||||||
else:
|
|
||||||
# check ipv6 with port
|
|
||||||
if '[' in url_str:
|
|
||||||
try:
|
|
||||||
bkt_idx = url_str.index(']')
|
|
||||||
if bkt_idx + 1 == len(url_str):
|
|
||||||
# brackets without port
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
return is_valid_ipv6(url_str[1:bkt_idx])
|
|
||||||
except Exception:
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
# check ipv6 without port
|
|
||||||
return is_valid_ipv6(url_str)
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def is_valid_bool_str(val):
|
|
||||||
"""Check if the provided string is a valid bool string or not."""
|
|
||||||
boolstrs = ('true', 'false')
|
|
||||||
return str(val).lower() in boolstrs
|
|
||||||
|
|
||||||
|
|
||||||
def validate_address_str(ip_address_str, network):
|
def validate_address_str(ip_address_str, network):
|
||||||
"""Determine whether an address is valid."""
|
"""Determine whether an address is valid."""
|
||||||
try:
|
try:
|
||||||
|
@ -1087,125 +404,3 @@ def ip_version_to_string(ip_version):
|
||||||
return "IPv6"
|
return "IPv6"
|
||||||
else:
|
else:
|
||||||
return "IP"
|
return "IP"
|
||||||
|
|
||||||
|
|
||||||
def validate_nameserver_address_str(ip_address_str, subnet_version=None):
|
|
||||||
"""Determine whether a nameserver address is valid."""
|
|
||||||
try:
|
|
||||||
ip_address = netaddr.IPAddress(ip_address_str)
|
|
||||||
if subnet_version is not None and ip_address.version != subnet_version:
|
|
||||||
msg = ("Invalid IP version - must match OAM subnet version " +
|
|
||||||
ip_version_to_string(subnet_version))
|
|
||||||
raise ValidateFail(msg)
|
|
||||||
return ip_address
|
|
||||||
except netaddr.AddrFormatError:
|
|
||||||
msg = "Invalid address - not a valid %s address" % \
|
|
||||||
ip_version_to_string(subnet_version)
|
|
||||||
raise ValidateFail(msg)
|
|
||||||
|
|
||||||
|
|
||||||
def validate_address(ip_address, network):
|
|
||||||
"""Determine whether an address is valid."""
|
|
||||||
if ip_address.version != network.version:
|
|
||||||
msg = ("Invalid IP version - must match network version " +
|
|
||||||
ip_version_to_string(network.version))
|
|
||||||
raise ValidateFail(msg)
|
|
||||||
elif ip_address == network:
|
|
||||||
raise ValidateFail("Cannot use network address")
|
|
||||||
elif ip_address == network.broadcast:
|
|
||||||
raise ValidateFail("Cannot use broadcast address")
|
|
||||||
elif ip_address not in network:
|
|
||||||
raise ValidateFail("Address must be in subnet %s" % str(network))
|
|
||||||
|
|
||||||
|
|
||||||
def check_network_overlap(new_network, configured_networks):
|
|
||||||
""" Validate that new_network does not overlap any configured_networks.
|
|
||||||
"""
|
|
||||||
if any(new_network.ip in subnet for subnet in
|
|
||||||
configured_networks):
|
|
||||||
raise ValidateFail(
|
|
||||||
"Subnet %s overlaps with another configured subnet" % new_network)
|
|
||||||
|
|
||||||
|
|
||||||
def validate_openstack_password(password, rules_file,
|
|
||||||
section="security_compliance"):
|
|
||||||
try:
|
|
||||||
config = configparser.RawConfigParser()
|
|
||||||
parsed_config = config.read(rules_file)
|
|
||||||
if not parsed_config:
|
|
||||||
msg = ("Cannot parse rules file: %s" % rules_file)
|
|
||||||
raise Exception(msg)
|
|
||||||
if not config.has_section(section):
|
|
||||||
msg = ("Required section '%s' not found in rules file" % section)
|
|
||||||
raise Exception(msg)
|
|
||||||
|
|
||||||
password_regex = get_optional(config, section, 'password_regex')
|
|
||||||
password_regex_description = get_optional(config, section,
|
|
||||||
'password_regex_description')
|
|
||||||
|
|
||||||
if not password_regex:
|
|
||||||
msg = ("Required option 'password_regex' not found in "
|
|
||||||
"rule file: %s" % rules_file)
|
|
||||||
raise Exception(msg)
|
|
||||||
# Even if regex_description is not found, we will proceed
|
|
||||||
# and give a generic failure warning instead
|
|
||||||
if not password_regex_description:
|
|
||||||
password_regex_description = ("Password does not meet "
|
|
||||||
"complexity criteria")
|
|
||||||
|
|
||||||
if not isinstance(password, six.string_types):
|
|
||||||
msg = "Password must be a string type"
|
|
||||||
raise Exception(msg)
|
|
||||||
try:
|
|
||||||
# config parser would read in the string as a literal
|
|
||||||
# representation which would fail regex matching
|
|
||||||
password_regex = password_regex.strip('"')
|
|
||||||
if not re.match(password_regex, password):
|
|
||||||
return False, password_regex_description
|
|
||||||
except re.error:
|
|
||||||
msg = ("Unable to validate password due to invalid "
|
|
||||||
"complexity criteria ('password_regex')")
|
|
||||||
raise Exception(msg)
|
|
||||||
except Exception:
|
|
||||||
raise Exception("Password validation failed")
|
|
||||||
return True, ""
|
|
||||||
|
|
||||||
|
|
||||||
def extract_openstack_password_rules_from_file(
|
|
||||||
rules_file, section="security_compliance"):
|
|
||||||
try:
|
|
||||||
config = configparser.RawConfigParser()
|
|
||||||
parsed_config = config.read(rules_file)
|
|
||||||
if not parsed_config:
|
|
||||||
msg = ("Cannot parse rules file: %s" % rules_file)
|
|
||||||
raise Exception(msg)
|
|
||||||
if not config.has_section(section):
|
|
||||||
msg = ("Required section '%s' not found in rules file" % section)
|
|
||||||
raise Exception(msg)
|
|
||||||
|
|
||||||
rules = config.items(section)
|
|
||||||
if not rules:
|
|
||||||
msg = ("section '%s' contains no configuration options" % section)
|
|
||||||
raise Exception(msg)
|
|
||||||
return dict(rules)
|
|
||||||
except Exception:
|
|
||||||
raise Exception("Failed to extract password rules from file")
|
|
||||||
|
|
||||||
|
|
||||||
def get_optional(conf, section, key):
|
|
||||||
if conf.has_option(section, key):
|
|
||||||
return conf.get(section, key)
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def get_service(conf, section, key):
|
|
||||||
if key in EXPECTED_SERVICE_NAME_AND_TYPE:
|
|
||||||
if conf.has_option(section, key):
|
|
||||||
value = conf.get(section, key)
|
|
||||||
if value != EXPECTED_SERVICE_NAME_AND_TYPE[key]:
|
|
||||||
raise ValidateFail("Unsupported %s: %s " % (key, value))
|
|
||||||
else:
|
|
||||||
value = EXPECTED_SERVICE_NAME_AND_TYPE[key]
|
|
||||||
return value
|
|
||||||
else:
|
|
||||||
return conf.get(section, key)
|
|
||||||
|
|
|
@ -35,22 +35,14 @@ load-plugins=
|
||||||
# fixme: (notes, xxx, fixme)
|
# fixme: (notes, xxx, fixme)
|
||||||
# W0101: unreachable-code
|
# W0101: unreachable-code
|
||||||
# W0105: pointless-string-statement
|
# W0105: pointless-string-statement
|
||||||
# W0110: deprecated-lambda
|
|
||||||
# W0120: useless-else-on-loop
|
|
||||||
# W0201: attribute-defined-outside-init
|
|
||||||
# W0211: bad-staticmethod-argument
|
|
||||||
# W0212: protected-access
|
|
||||||
# W0311: bad-indentation
|
# W0311: bad-indentation
|
||||||
# W0403: relative-import
|
# W0403: relative-import
|
||||||
# W0612: unused-variable
|
|
||||||
# W0613: unused-argument
|
# W0613: unused-argument
|
||||||
# W0621: redefined-outer-name
|
# W0621: redefined-outer-name
|
||||||
# W0622: redefined-builtin
|
# W0622: redefined-builtin
|
||||||
# W0631: undefined-loop-variable
|
|
||||||
# W0703: broad-except
|
# W0703: broad-except
|
||||||
# W1501: bad-open-mode
|
# W1501: bad-open-mode
|
||||||
disable=C, R, fixme, W0101, W0105, W0110, W0120, W0201, W0211, W0212, W0311, W0403,
|
disable=C, R, fixme, W0101, W0105, W0311, W0403, W0613, W0621, W0622, W0703, W1501
|
||||||
W0612, W0613, W0621, W0622, W0631, W0703, W1501
|
|
||||||
|
|
||||||
|
|
||||||
[REPORTS]
|
[REPORTS]
|
||||||
|
|
|
@ -1,42 +0,0 @@
|
||||||
#! /bin/bash
|
|
||||||
########################################################################
|
|
||||||
#
|
|
||||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
########################################################################
|
|
||||||
|
|
||||||
PLATFORMOPENRC="/etc/platform/openrc"
|
|
||||||
if [ -e ${PLATFORMOPENRC} ] ; then
|
|
||||||
source ${PLATFORMOPENRC} &>/dev/null
|
|
||||||
else
|
|
||||||
echo "Admin credentials not found"
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Delete all the servers
|
|
||||||
echo "Deleting all servers [`openstack server list --all`]"
|
|
||||||
found=false
|
|
||||||
for i in $(openstack server list --all -c ID -f value); do
|
|
||||||
`openstack server delete $i &> /dev/null`
|
|
||||||
echo $i deleted
|
|
||||||
found=true
|
|
||||||
done
|
|
||||||
if $found; then
|
|
||||||
sleep 30
|
|
||||||
fi
|
|
||||||
echo "Deleted all servers [`openstack server list --all`]"
|
|
||||||
# Delete all the volumes
|
|
||||||
echo "Deleting all volumes [`openstack volume list --all`]"
|
|
||||||
found=false
|
|
||||||
for i in $(openstack volume list --all -c ID -f value); do
|
|
||||||
`openstack volume delete $i &> /dev/null`
|
|
||||||
echo $i deleted
|
|
||||||
found=true
|
|
||||||
done
|
|
||||||
if $found; then
|
|
||||||
sleep 30
|
|
||||||
fi
|
|
||||||
echo "Deleted all volumes [`openstack volume list --all`]"
|
|
||||||
|
|
|
@ -1,321 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
#
|
|
||||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
import uuid
|
|
||||||
import shutil
|
|
||||||
import tempfile
|
|
||||||
import subprocess
|
|
||||||
from six.moves import configparser
|
|
||||||
|
|
||||||
import tsconfig.tsconfig as tsconfig
|
|
||||||
from controllerconfig.common import log
|
|
||||||
import controllerconfig.utils as utils
|
|
||||||
import controllerconfig.sysinv_api as sysinv
|
|
||||||
import controllerconfig.backup_restore as backup_restore
|
|
||||||
import controllerconfig.clone as clone
|
|
||||||
from controllerconfig.common.exceptions import CloneFail
|
|
||||||
from sysinv.common import constants as si_const
|
|
||||||
|
|
||||||
LOG = log.get_logger("cloning")
|
|
||||||
DEVNULL = open(os.devnull, 'w')
|
|
||||||
INI_FILE = os.path.join("/", clone.CLONE_ARCHIVE_DIR, clone.CLONE_ISO_INI)
|
|
||||||
SECTION = "clone_iso"
|
|
||||||
parser = configparser.SafeConfigParser()
|
|
||||||
clone_name = ""
|
|
||||||
|
|
||||||
|
|
||||||
def console_log(str, err=False):
|
|
||||||
""" Log onto console also """
|
|
||||||
if err:
|
|
||||||
str = "Failed to install clone-image. " + str
|
|
||||||
LOG.error(str)
|
|
||||||
else:
|
|
||||||
LOG.info(str)
|
|
||||||
print("\n" + str)
|
|
||||||
|
|
||||||
|
|
||||||
def persist(key, value):
|
|
||||||
""" Write into ini file """
|
|
||||||
parser.set(SECTION, key, value)
|
|
||||||
with open(INI_FILE, 'w') as f:
|
|
||||||
parser.write(f)
|
|
||||||
|
|
||||||
|
|
||||||
def set_result(value):
|
|
||||||
""" Set the result of installation of clone image """
|
|
||||||
persist(clone.RESULT, value)
|
|
||||||
persist(clone.INSTALLED, time.strftime("%Y-%m-%d %H:%M:%S %Z"))
|
|
||||||
|
|
||||||
|
|
||||||
def validate_hardware_compatibility():
|
|
||||||
""" validate if cloned-image can be installed on this h/w """
|
|
||||||
valid = True
|
|
||||||
disk_paths = ""
|
|
||||||
if parser.has_option(SECTION, "disks"):
|
|
||||||
disk_paths = parser.get(SECTION, "disks")
|
|
||||||
if not disk_paths:
|
|
||||||
console_log("Missing value [disks] in ini file")
|
|
||||||
valid = False
|
|
||||||
for d in disk_paths.split():
|
|
||||||
disk_path, size = d.split('#')
|
|
||||||
if os.path.exists('/dev/disk/by-path/' + disk_path):
|
|
||||||
LOG.info("Disk [{}] exists".format(disk_path))
|
|
||||||
disk_size = clone.get_disk_size('/dev/disk/by-path/' +
|
|
||||||
disk_path)
|
|
||||||
if int(disk_size) >= int(size):
|
|
||||||
LOG.info("Disk size is good: {} >= {}"
|
|
||||||
.format(utils.print_bytes(int(disk_size)),
|
|
||||||
utils.print_bytes(int(size))))
|
|
||||||
else:
|
|
||||||
console_log("Not enough disk size[{}], "
|
|
||||||
"found:{} looking_for:{}".format(
|
|
||||||
disk_path, utils.print_bytes(int(disk_size)),
|
|
||||||
utils.print_bytes(int(size))), err=True)
|
|
||||||
valid = False
|
|
||||||
else:
|
|
||||||
console_log("Disk [{}] does not exist!"
|
|
||||||
.format(disk_path), err=True)
|
|
||||||
valid = False
|
|
||||||
|
|
||||||
interfaces = ""
|
|
||||||
if parser.has_option(SECTION, "interfaces"):
|
|
||||||
interfaces = parser.get(SECTION, "interfaces")
|
|
||||||
if not interfaces:
|
|
||||||
console_log("Missing value [interfaces] in ini file")
|
|
||||||
valid = False
|
|
||||||
for f in interfaces.split():
|
|
||||||
if os.path.exists('/sys/class/net/' + f):
|
|
||||||
LOG.info("Interface [{}] exists".format(f))
|
|
||||||
else:
|
|
||||||
console_log("Interface [{}] does not exist!"
|
|
||||||
.format(f), err=True)
|
|
||||||
valid = False
|
|
||||||
|
|
||||||
maxcpuid = ""
|
|
||||||
if parser.has_option(SECTION, "cpus"):
|
|
||||||
maxcpuid = parser.get(SECTION, "cpus")
|
|
||||||
if not maxcpuid:
|
|
||||||
console_log("Missing value [cpus] in ini file")
|
|
||||||
valid = False
|
|
||||||
else:
|
|
||||||
my_maxcpuid = clone.get_online_cpus()
|
|
||||||
if int(maxcpuid) <= int(my_maxcpuid):
|
|
||||||
LOG.info("Got enough cpus {},{}".format(
|
|
||||||
maxcpuid, my_maxcpuid))
|
|
||||||
else:
|
|
||||||
console_log("Not enough CPUs, found:{} looking_for:{}"
|
|
||||||
.format(my_maxcpuid, maxcpuid), err=True)
|
|
||||||
valid = False
|
|
||||||
|
|
||||||
mem_total = ""
|
|
||||||
if parser.has_option(SECTION, "mem"):
|
|
||||||
mem_total = parser.get(SECTION, "mem")
|
|
||||||
if not mem_total:
|
|
||||||
console_log("Missing value [mem] in ini file")
|
|
||||||
valid = False
|
|
||||||
else:
|
|
||||||
my_mem_total = clone.get_total_mem()
|
|
||||||
# relaxed RAM check: within 1 GiB
|
|
||||||
if (int(mem_total) - (1024 * 1024)) <= int(my_mem_total):
|
|
||||||
LOG.info("Got enough memory {},{}".format(
|
|
||||||
mem_total, my_mem_total))
|
|
||||||
else:
|
|
||||||
console_log("Not enough memory; found:{} kB, "
|
|
||||||
"looking for a minimum of {} kB"
|
|
||||||
.format(my_mem_total, mem_total), err=True)
|
|
||||||
valid = False
|
|
||||||
|
|
||||||
if not valid:
|
|
||||||
console_log("Validation failure!")
|
|
||||||
set_result(clone.FAIL)
|
|
||||||
time.sleep(20)
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
console_log("Successful validation")
|
|
||||||
|
|
||||||
|
|
||||||
def update_sysuuid_in_archive(tmpdir):
|
|
||||||
"""Update system uuid in system archive file."""
|
|
||||||
sysuuid = str(uuid.uuid4())
|
|
||||||
clone.find_and_replace(
|
|
||||||
[os.path.join(tmpdir, 'postgres/sysinv.sql.data')],
|
|
||||||
"CLONEISO_SYSTEM_UUID", sysuuid)
|
|
||||||
LOG.info("System uuid updated [%s]" % sysuuid)
|
|
||||||
|
|
||||||
|
|
||||||
def update_db(archive_dir, backup_name):
|
|
||||||
""" Update DB before restore """
|
|
||||||
path_to_archive = os.path.join(archive_dir, backup_name)
|
|
||||||
LOG.info("Updating system archive [%s] DB." % path_to_archive)
|
|
||||||
tmpdir = tempfile.mkdtemp(dir=archive_dir)
|
|
||||||
try:
|
|
||||||
subprocess.check_call(
|
|
||||||
['gunzip', path_to_archive + '.tgz'],
|
|
||||||
stdout=DEVNULL, stderr=DEVNULL)
|
|
||||||
# Extract only postgres dir to update system uuid
|
|
||||||
subprocess.check_call(
|
|
||||||
['tar', '-x',
|
|
||||||
'--directory=' + tmpdir,
|
|
||||||
'-f', path_to_archive + '.tar',
|
|
||||||
'postgres'],
|
|
||||||
stdout=DEVNULL, stderr=DEVNULL)
|
|
||||||
update_sysuuid_in_archive(tmpdir)
|
|
||||||
subprocess.check_call(
|
|
||||||
['tar', '--update',
|
|
||||||
'--directory=' + tmpdir,
|
|
||||||
'-f', path_to_archive + '.tar',
|
|
||||||
'postgres'],
|
|
||||||
stdout=DEVNULL, stderr=DEVNULL)
|
|
||||||
subprocess.check_call(['gzip', path_to_archive + '.tar'])
|
|
||||||
shutil.move(path_to_archive + '.tar.gz', path_to_archive + '.tgz')
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
LOG.error("Update of system archive {} failed {}".format(
|
|
||||||
path_to_archive, str(e)))
|
|
||||||
raise CloneFail("Failed to update system archive")
|
|
||||||
|
|
||||||
finally:
|
|
||||||
shutil.rmtree(tmpdir, ignore_errors=True)
|
|
||||||
|
|
||||||
|
|
||||||
def config_worker():
|
|
||||||
"""
|
|
||||||
Enable worker functionality for AIO system.
|
|
||||||
:return: True if worker-config-complete is executed
|
|
||||||
"""
|
|
||||||
if utils.get_system_type() == si_const.TIS_AIO_BUILD:
|
|
||||||
console_log("Applying worker manifests for {}. "
|
|
||||||
"Node will reboot on completion."
|
|
||||||
.format(utils.get_controller_hostname()))
|
|
||||||
sysinv.do_worker_config_complete(utils.get_controller_hostname())
|
|
||||||
time.sleep(30)
|
|
||||||
# worker-config-complete has no logs to console. So, wait
|
|
||||||
# for some time before showing the login prompt.
|
|
||||||
for i in range(1, 10):
|
|
||||||
console_log("worker-config in progress..")
|
|
||||||
time.sleep(30)
|
|
||||||
console_log("Timed out on do_worker_config_complete")
|
|
||||||
raise CloneFail("Timed out on do_worker_config_complete")
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
# worker_config_complete is not needed.
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def finalize_install():
|
|
||||||
""" Complete the installation """
|
|
||||||
subprocess.call(["rm", "-f", tsconfig.CONFIG_PATH + '/dnsmasq.leases'])
|
|
||||||
console_log("Updating system parameters...")
|
|
||||||
i = 1
|
|
||||||
system_update = False
|
|
||||||
# Retries if sysinv is not yet ready
|
|
||||||
while i < 10:
|
|
||||||
time.sleep(20)
|
|
||||||
LOG.info("Attempt %d to update system parameters..." % i)
|
|
||||||
try:
|
|
||||||
if sysinv.update_clone_system('Cloned_from_' + clone_name,
|
|
||||||
utils.get_controller_hostname()):
|
|
||||||
system_update = True
|
|
||||||
break
|
|
||||||
except Exception:
|
|
||||||
# Sysinv might not be ready yet
|
|
||||||
pass
|
|
||||||
i += 1
|
|
||||||
if not system_update:
|
|
||||||
LOG.error("System update failed")
|
|
||||||
raise CloneFail("System update failed")
|
|
||||||
|
|
||||||
try:
|
|
||||||
output = subprocess.check_output(["finish_install_clone.sh"],
|
|
||||||
stderr=subprocess.STDOUT)
|
|
||||||
LOG.info("finish_install_clone out: {}".format(output))
|
|
||||||
except Exception:
|
|
||||||
console_log("Failed to cleanup stale OpenStack resources. "
|
|
||||||
"Manually delete the Volumes and Instances.")
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup():
|
|
||||||
""" Cleanup after installation """
|
|
||||||
LOG.info("Cleaning up...")
|
|
||||||
subprocess.call(['systemctl', 'disable', 'install-clone'], stderr=DEVNULL)
|
|
||||||
OLD_FILE = os.path.join(tsconfig.PLATFORM_CONF_PATH, clone.CLONE_ISO_INI)
|
|
||||||
if os.path.exists(OLD_FILE):
|
|
||||||
os.remove(OLD_FILE)
|
|
||||||
if os.path.exists(INI_FILE):
|
|
||||||
os.chmod(INI_FILE, 0o400)
|
|
||||||
shutil.move(INI_FILE, tsconfig.PLATFORM_CONF_PATH)
|
|
||||||
shutil.rmtree(os.path.join("/", clone.CLONE_ARCHIVE_DIR),
|
|
||||||
ignore_errors=True)
|
|
||||||
|
|
||||||
|
|
||||||
log.configure()
|
|
||||||
if os.path.exists(INI_FILE):
|
|
||||||
try:
|
|
||||||
parser.read(INI_FILE)
|
|
||||||
if parser.has_section(SECTION):
|
|
||||||
clone_name = parser.get(SECTION, clone.NAME)
|
|
||||||
LOG.info("System archive [%s] to be installed." % clone_name)
|
|
||||||
|
|
||||||
first_boot = False
|
|
||||||
last_result = clone.IN_PROGRESS
|
|
||||||
if not parser.has_option(SECTION, clone.RESULT):
|
|
||||||
# first boot after cloning
|
|
||||||
first_boot = True
|
|
||||||
else:
|
|
||||||
last_result = parser.get(SECTION, clone.RESULT)
|
|
||||||
LOG.info("Last attempt to install clone was [{}]"
|
|
||||||
.format(last_result))
|
|
||||||
|
|
||||||
if last_result == clone.IN_PROGRESS:
|
|
||||||
if first_boot:
|
|
||||||
update_db(os.path.join("/", clone.CLONE_ARCHIVE_DIR),
|
|
||||||
clone_name + '_system')
|
|
||||||
else:
|
|
||||||
# Booting up after patch application, do validation
|
|
||||||
validate_hardware_compatibility()
|
|
||||||
|
|
||||||
console_log("+++++ Starting to install clone-image [{}] +++++"
|
|
||||||
.format(clone_name))
|
|
||||||
set_result(clone.IN_PROGRESS)
|
|
||||||
clone_arch_path = os.path.join("/", clone.CLONE_ARCHIVE_DIR,
|
|
||||||
clone_name)
|
|
||||||
if (backup_restore.RESTORE_RERUN_REQUIRED ==
|
|
||||||
backup_restore.restore_system(
|
|
||||||
clone_arch_path + "_system.tgz",
|
|
||||||
clone=True)):
|
|
||||||
# If there are no patches to be applied, run validation
|
|
||||||
# code and resume restore. If patches were applied, node
|
|
||||||
# will be rebooted and validate will after reboot.
|
|
||||||
validate_hardware_compatibility()
|
|
||||||
LOG.info("validate passed, resuming restore...")
|
|
||||||
backup_restore.restore_system(
|
|
||||||
clone_arch_path + "_system.tgz", clone=True)
|
|
||||||
console_log("System archive installed from [%s]" % clone_name)
|
|
||||||
backup_restore.restore_images(clone_arch_path + "_images.tgz",
|
|
||||||
clone=True)
|
|
||||||
console_log("Images archive installed from [%s]" % clone_name)
|
|
||||||
finalize_install()
|
|
||||||
set_result(clone.OK)
|
|
||||||
if not config_worker():
|
|
||||||
# do cleanup if worker_config_complete is not required
|
|
||||||
cleanup()
|
|
||||||
elif last_result == clone.OK:
|
|
||||||
# Installation completed successfully before last reboot
|
|
||||||
cleanup()
|
|
||||||
else:
|
|
||||||
LOG.error("Bad file: {}".format(INI_FILE))
|
|
||||||
set_result(clone.FAIL)
|
|
||||||
exit(1)
|
|
||||||
except Exception as e:
|
|
||||||
console_log("Clone [%s] installation failed" % clone_name)
|
|
||||||
LOG.exception("install failed")
|
|
||||||
set_result(clone.FAIL)
|
|
||||||
exit(1)
|
|
||||||
else:
|
|
||||||
console_log("nothing to do, Not installing clone?")
|
|
|
@ -1,30 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
|
|
||||||
#
|
|
||||||
# Copyright (c) 2014 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
|
|
||||||
import keyring
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
def get_stealth_password():
|
|
||||||
"""Get the stealth password vault for manifest to run"""
|
|
||||||
orig_root = os.environ.get('XDG_DATA_HOME', None)
|
|
||||||
os.environ["XDG_DATA_HOME"] = "/tmp"
|
|
||||||
|
|
||||||
stealth_pw = keyring.get_password("CGCS", "admin")
|
|
||||||
|
|
||||||
if orig_root is not None:
|
|
||||||
os.environ("XDG_DATA_HOME",orig_root)
|
|
||||||
else:
|
|
||||||
del os.environ["XDG_DATA_HOME"]
|
|
||||||
return stealth_pw
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
sys.stdout.write(get_stealth_password())
|
|
||||||
sys.stdout.flush()
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
#
|
#
|
||||||
# Copyright (c) 2015-2017 Wind River Systems, Inc.
|
# Copyright (c) 2015-2020 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
|
@ -18,9 +18,6 @@ setup(
|
||||||
include_package_data=False,
|
include_package_data=False,
|
||||||
entry_points={
|
entry_points={
|
||||||
'console_scripts': [
|
'console_scripts': [
|
||||||
'config_controller = controllerconfig.systemconfig:main',
|
|
||||||
'config_region = controllerconfig.regionconfig:region_main',
|
|
||||||
'config_subcloud = controllerconfig.regionconfig:subcloud_main',
|
|
||||||
'config_management = controllerconfig.config_management:main',
|
'config_management = controllerconfig.config_management:main',
|
||||||
'upgrade_controller = controllerconfig.upgrades.controller:main',
|
'upgrade_controller = controllerconfig.upgrades.controller:main',
|
||||||
'upgrade_controller_simplex = '
|
'upgrade_controller_simplex = '
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
# and then run "tox" from this directory.
|
# and then run "tox" from this directory.
|
||||||
|
|
||||||
[tox]
|
[tox]
|
||||||
envlist = flake8, py27, pylint
|
envlist = flake8, pylint
|
||||||
# Tox does not work if the path to the workdir is too long, so move it to /tmp
|
# Tox does not work if the path to the workdir is too long, so move it to /tmp
|
||||||
toxworkdir = /tmp/{env:USER}_cctox
|
toxworkdir = /tmp/{env:USER}_cctox
|
||||||
stxdir = {toxinidir}/../../..
|
stxdir = {toxinidir}/../../..
|
||||||
|
@ -41,21 +41,13 @@ commands = flake8 {posargs}
|
||||||
# H101: Use TODO(NAME)
|
# H101: Use TODO(NAME)
|
||||||
# H102: Apache 2.0 license header not found
|
# H102: Apache 2.0 license header not found
|
||||||
# H104: File contains nothing but comments
|
# H104: File contains nothing but comments
|
||||||
# H238: old style class declaration, use new style (inherit from `object`)
|
|
||||||
# H306: imports not in alphabetical order
|
# H306: imports not in alphabetical order
|
||||||
# H401: docstring should not start with a space
|
# H401: docstring should not start with a space
|
||||||
# H403: multi line docstrings should end on a new line
|
|
||||||
# H404: multi line docstring should start without a leading new line
|
# H404: multi line docstring should start without a leading new line
|
||||||
# H405: multi line docstring summary not separated with an empty line
|
# H405: multi line docstring summary not separated with an empty line
|
||||||
ignore = H101,H102,H104,H238,H306,H401,H403,H404,H405
|
ignore = H101,H102,H104,H306,H401,H404,H405
|
||||||
exclude = build
|
exclude = build
|
||||||
|
|
||||||
[testenv:py27]
|
|
||||||
basepython = python2.7
|
|
||||||
commands =
|
|
||||||
find . -type f -name "*.pyc" -delete
|
|
||||||
py.test {posargs}
|
|
||||||
|
|
||||||
[testenv:cover]
|
[testenv:cover]
|
||||||
basepython = python2.7
|
basepython = python2.7
|
||||||
deps = {[testenv]deps}
|
deps = {[testenv]deps}
|
||||||
|
|
|
@ -14,9 +14,9 @@ import psycopg2
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from psycopg2.extras import RealDictCursor
|
from psycopg2.extras import RealDictCursor
|
||||||
from controllerconfig.common import log
|
from oslo_log import log
|
||||||
|
|
||||||
LOG = log.get_logger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
|
@ -13,9 +13,9 @@ import sys
|
||||||
|
|
||||||
from sysinv.common import constants
|
from sysinv.common import constants
|
||||||
from psycopg2.extras import RealDictCursor
|
from psycopg2.extras import RealDictCursor
|
||||||
from controllerconfig.common import log
|
from oslo_log import log
|
||||||
|
|
||||||
LOG = log.get_logger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
# Sections that need to be removed from retired Ceph cache tiering feature
|
# Sections that need to be removed from retired Ceph cache tiering feature
|
||||||
SERVICE_PARAM_SECTION_CEPH_CACHE_TIER = 'cache_tiering'
|
SERVICE_PARAM_SECTION_CEPH_CACHE_TIER = 'cache_tiering'
|
||||||
|
|
|
@ -55,10 +55,7 @@ Configuration for the Controller node.
|
||||||
#install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/
|
#install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/
|
||||||
|
|
||||||
install -d -m 755 %{buildroot}%{local_bindir}
|
install -d -m 755 %{buildroot}%{local_bindir}
|
||||||
install -p -D -m 700 scripts/keyringstaging %{buildroot}%{local_bindir}/keyringstaging
|
|
||||||
install -p -D -m 700 scripts/openstack_update_admin_password %{buildroot}%{local_bindir}/openstack_update_admin_password
|
install -p -D -m 700 scripts/openstack_update_admin_password %{buildroot}%{local_bindir}/openstack_update_admin_password
|
||||||
install -p -D -m 700 scripts/install_clone.py %{buildroot}%{local_bindir}/install_clone
|
|
||||||
install -p -D -m 700 scripts/finish_install_clone.sh %{buildroot}%{local_bindir}/finish_install_clone.sh
|
|
||||||
|
|
||||||
install -d -m 755 %{buildroot}%{local_goenabledd}
|
install -d -m 755 %{buildroot}%{local_goenabledd}
|
||||||
install -p -D -m 700 scripts/config_goenabled_check.sh %{buildroot}%{local_goenabledd}/config_goenabled_check.sh
|
install -p -D -m 700 scripts/config_goenabled_check.sh %{buildroot}%{local_goenabledd}/config_goenabled_check.sh
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
#
|
#
|
||||||
# Copyright (c) 2013-2019 Wind River Systems, Inc.
|
# Copyright (c) 2013-2020 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
@ -41,7 +41,6 @@ import wsme
|
||||||
import wsmeext.pecan as wsme_pecan
|
import wsmeext.pecan as wsme_pecan
|
||||||
|
|
||||||
from wsme import types as wtypes
|
from wsme import types as wtypes
|
||||||
from controllerconfig import HOST_XML_ATTRIBUTES
|
|
||||||
from fm_api import constants as fm_constants
|
from fm_api import constants as fm_constants
|
||||||
from fm_api import fm_api
|
from fm_api import fm_api
|
||||||
from pecan import expose
|
from pecan import expose
|
||||||
|
@ -98,6 +97,12 @@ from sysinv.common import health
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
KEYRING_BM_SERVICE = "BM"
|
KEYRING_BM_SERVICE = "BM"
|
||||||
ERR_CODE_LOCK_SOLE_SERVICE_PROVIDER = "-1003"
|
ERR_CODE_LOCK_SOLE_SERVICE_PROVIDER = "-1003"
|
||||||
|
HOST_XML_ATTRIBUTES = ['hostname', 'personality', 'subfunctions',
|
||||||
|
'mgmt_mac', 'mgmt_ip',
|
||||||
|
'bm_ip', 'bm_type', 'bm_username',
|
||||||
|
'bm_password', 'boot_device', 'rootfs_device',
|
||||||
|
'install_output', 'console', 'vsc_controllers',
|
||||||
|
'power_on', 'location']
|
||||||
|
|
||||||
|
|
||||||
def _get_controller_address(hostname):
|
def _get_controller_address(hostname):
|
||||||
|
|
|
@ -1,12 +1,11 @@
|
||||||
#
|
#
|
||||||
# Copyright (c) 2018-2019 Wind River Systems, Inc.
|
# Copyright (c) 2018-2020 Wind River Systems, Inc.
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
#
|
#
|
||||||
from eventlet.green import subprocess
|
from eventlet.green import subprocess
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from controllerconfig import backup_restore
|
|
||||||
|
|
||||||
from fm_api import fm_api
|
from fm_api import fm_api
|
||||||
|
|
||||||
|
@ -208,12 +207,14 @@ class Health(object):
|
||||||
|
|
||||||
def _check_simplex_available_space(self):
|
def _check_simplex_available_space(self):
|
||||||
"""Ensures there is free space for the backup"""
|
"""Ensures there is free space for the backup"""
|
||||||
try:
|
|
||||||
backup_restore.check_size("/opt/backups", True)
|
|
||||||
except backup_restore.BackupFail:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
# TODO: Switch this over to use Ansible
|
||||||
|
# try:
|
||||||
|
# backup_restore.check_size("/opt/backups", True)
|
||||||
|
# except backup_restore.BackupFail:
|
||||||
|
# return False
|
||||||
|
# return True
|
||||||
|
LOG.info("Skip the check of the enough free space.")
|
||||||
|
|
||||||
def _check_kube_nodes_ready(self):
|
def _check_kube_nodes_ready(self):
|
||||||
"""Checks that each kubernetes node is ready"""
|
"""Checks that each kubernetes node is ready"""
|
||||||
|
|
Loading…
Reference in New Issue