FIX: "dict" issue for Python 2/3 compatible code
Replace dict.iteritems() with dict.items() Change dict.keys() to list(dict.keys()) Story: 2004585 Task: 28449 Change-Id: I674c499fd575800fa307a835d8f7f272f2b2cd03 Signed-off-by: Sun Austin <austin.sun@intel.com>
This commit is contained in:
parent
bb65ec36e6
commit
5e9dc48384
|
@ -707,7 +707,7 @@ class PatchOrchThread(threading.Thread):
|
|||
# removed to match the applied patches in RegionOne. Check the
|
||||
# repostate, which indicates whether it is applied or removed in
|
||||
# the repo.
|
||||
subcloud_patch_ids = subcloud_patches.keys()
|
||||
subcloud_patch_ids = list(subcloud_patches.keys())
|
||||
for patch_id in subcloud_patch_ids:
|
||||
if subcloud_patches[patch_id]['repostate'] == \
|
||||
patching_v1.PATCH_STATE_APPLIED:
|
||||
|
@ -1386,7 +1386,7 @@ class PatchOrchThread(threading.Thread):
|
|||
# which should be deleted. We check the patchstate here because
|
||||
# patches cannot be deleted or committed if they are in a partial
|
||||
# state (e.g. Partial-Apply or Partial-Remove).
|
||||
subcloud_patch_ids = subcloud_patches.keys()
|
||||
subcloud_patch_ids = list(subcloud_patches.keys())
|
||||
for patch_id in subcloud_patch_ids:
|
||||
if subcloud_patches[patch_id]['patchstate'] == \
|
||||
patching_v1.PATCH_STATE_AVAILABLE:
|
||||
|
|
|
@ -74,8 +74,8 @@ class Acceptor(Router):
|
|||
api_controller = ComputeAPIController(app, conf)
|
||||
orch_controller = OrchAPIController(app, conf)
|
||||
|
||||
for key, value in proxy_consts.COMPUTE_PATH_MAP.iteritems():
|
||||
for k, v in value.iteritems():
|
||||
for key, value in proxy_consts.COMPUTE_PATH_MAP.items():
|
||||
for k, v in value.items():
|
||||
self._add_resource(mapper, api_controller, v, k,
|
||||
CONF.type, key)
|
||||
|
||||
|
@ -87,14 +87,14 @@ class Acceptor(Router):
|
|||
def add_platform_routes(self, app, conf, mapper):
|
||||
api_controller = SysinvAPIController(app, conf)
|
||||
|
||||
for key, value in proxy_consts.SYSINV_PATH_MAP.iteritems():
|
||||
for key, value in proxy_consts.SYSINV_PATH_MAP.items():
|
||||
self._add_resource(mapper, api_controller, value, key, CONF.type)
|
||||
|
||||
def add_volume_routes(self, app, conf, mapper):
|
||||
api_controller = CinderAPIController(app, conf)
|
||||
|
||||
for key, value in proxy_consts.CINDER_PATH_MAP.iteritems():
|
||||
for k, v in value.iteritems():
|
||||
for key, value in proxy_consts.CINDER_PATH_MAP.items():
|
||||
for k, v in value.items():
|
||||
self._add_resource(mapper, api_controller, v, k,
|
||||
CONF.type, key)
|
||||
|
||||
|
@ -102,7 +102,7 @@ class Acceptor(Router):
|
|||
api_controller = NeutronAPIController(app, conf)
|
||||
orch_controller = OrchAPIController(app, conf)
|
||||
|
||||
for key, value in proxy_consts.NEUTRON_PATH_MAP.iteritems():
|
||||
for key, value in proxy_consts.NEUTRON_PATH_MAP.items():
|
||||
self._add_resource(mapper, api_controller, value, key, CONF.type)
|
||||
|
||||
self._add_resource(mapper, orch_controller,
|
||||
|
@ -113,11 +113,11 @@ class Acceptor(Router):
|
|||
def add_patch_routes(self, app, conf, mapper):
|
||||
api_controller = PatchAPIController(app, conf)
|
||||
|
||||
for key, value in proxy_consts.PATCH_PATH_MAP.iteritems():
|
||||
for key, value in proxy_consts.PATCH_PATH_MAP.items():
|
||||
self._add_resource(mapper, api_controller, value, key, CONF.type)
|
||||
|
||||
def add_identity_routes(self, app, conf, mapper):
|
||||
api_controller = IdentityAPIController(app, conf)
|
||||
|
||||
for key, value in proxy_consts.IDENTITY_PATH_MAP.iteritems():
|
||||
for key, value in proxy_consts.IDENTITY_PATH_MAP.items():
|
||||
self._add_resource(mapper, api_controller, value, key, CONF.type)
|
||||
|
|
|
@ -157,7 +157,7 @@ class APIController(Middleware):
|
|||
environ['REQUEST_METHOD'], length)
|
||||
LOG.info("Request URL: (%s)\n", self.get_request_header(environ))
|
||||
LOG.info("Request header: \n")
|
||||
for k, v in req.headers.iteritems():
|
||||
for k, v in req.headers.items():
|
||||
LOG.info(" %s: %s\n", k, v)
|
||||
self.print_environ(environ)
|
||||
self.print_request_body(req.body)
|
||||
|
@ -485,7 +485,7 @@ class IdentityAPIController(APIController):
|
|||
else:
|
||||
if operation_type == consts.OPERATION_TYPE_POST:
|
||||
# Retrieve the ID from the response
|
||||
resource = json.loads(response.body).items()[0][1]
|
||||
resource = list(json.loads(response.body).items())[0][1]
|
||||
resource_id = resource['id']
|
||||
else:
|
||||
resource_id = self.get_resource_id_from_link(request_header)
|
||||
|
|
|
@ -176,7 +176,7 @@ class PatchAPIController(Middleware):
|
|||
|
||||
def patch_upload_dir_req(self, request, response):
|
||||
files = []
|
||||
for key, path in request.GET.iteritems():
|
||||
for key, path in request.GET.items():
|
||||
LOG.info("upload-dir: Retrieving patches from %s" % path)
|
||||
for f in glob.glob(path + '/*.patch'):
|
||||
if os.path.isfile(f):
|
||||
|
|
|
@ -71,7 +71,7 @@ def get_routing_match_value(environ, key):
|
|||
return match[key]
|
||||
else:
|
||||
LOG.info("(%s) is not available in routing match arguments.", key)
|
||||
for k, v in match.iteritems():
|
||||
for k, v in match.items():
|
||||
LOG.info("Match key:(%s), value:(%s)", k, v)
|
||||
return None
|
||||
|
||||
|
|
|
@ -161,7 +161,7 @@ class EndpointCache(object):
|
|||
|
||||
return: List of regions
|
||||
"""
|
||||
return self.endpoint_map.keys()
|
||||
return list(self.endpoint_map.keys())
|
||||
|
||||
def get_session_from_token(self, token, project_id):
|
||||
"""Get session based on token to communicate with openstack services.
|
||||
|
|
|
@ -36,7 +36,7 @@ API_VERSION = '1'
|
|||
|
||||
def make_sysinv_patch(update_dict):
|
||||
patch = []
|
||||
for k, v in update_dict.iteritems():
|
||||
for k, v in update_dict.items():
|
||||
key = k
|
||||
if not k.startswith('/'):
|
||||
key = '/' + key
|
||||
|
@ -265,7 +265,7 @@ class SysinvClient(base.DriverBase):
|
|||
# {"ip_address": "10.10.10.12", "community": "cgcs"}
|
||||
itrapdest = None
|
||||
trapdest_create_dict = {}
|
||||
for k, v in trapdest_dict.iteritems():
|
||||
for k, v in trapdest_dict.items():
|
||||
if k in SNMP_TRAPDEST_CREATION_ATTRIBUTES:
|
||||
trapdest_create_dict[str(k)] = v
|
||||
|
||||
|
@ -329,7 +329,7 @@ class SysinvClient(base.DriverBase):
|
|||
# Example community_dict: {"community": "cgcs"}
|
||||
icommunity = None
|
||||
community_create_dict = {}
|
||||
for k, v in community_dict.iteritems():
|
||||
for k, v in community_dict.items():
|
||||
if k in SNMP_COMMUNITY_CREATION_ATTRIBUTES:
|
||||
community_create_dict[str(k)] = v
|
||||
|
||||
|
@ -399,7 +399,7 @@ class SysinvClient(base.DriverBase):
|
|||
def create_remote_logging_patch_from_dict(self, values):
|
||||
patch = {}
|
||||
action_found = False
|
||||
for k, v in values.iteritems():
|
||||
for k, v in values.items():
|
||||
if k in self.REMOTELOGGING_PATCH_ATTRS:
|
||||
if k == 'action':
|
||||
action_found = True
|
||||
|
|
|
@ -251,7 +251,7 @@ class QuotaManager(manager.Manager):
|
|||
project_id)
|
||||
except exceptions.ProjectQuotaNotFound:
|
||||
limits_from_db = {}
|
||||
for current_resource in CONF.dc_orch_global_limit.iteritems():
|
||||
for current_resource in CONF.dc_orch_global_limit.items():
|
||||
resource = re.sub('quota_', '', current_resource[0])
|
||||
# If resource limit in DB, then use it or else use limit
|
||||
# from conf file
|
||||
|
@ -481,7 +481,7 @@ class QuotaManager(manager.Manager):
|
|||
# endpoint types, so we need to figure out which ones we want.
|
||||
desired_fields = consts.ENDPOINT_QUOTA_MAPPING[endpoint_type]
|
||||
usage_dict = {}
|
||||
for k, v in total_project_usages.iteritems():
|
||||
for k, v in total_project_usages.items():
|
||||
if k in desired_fields:
|
||||
usage_dict[k] = v
|
||||
return usage_dict
|
||||
|
|
|
@ -292,7 +292,7 @@ class ComputeSyncThread(SyncThread):
|
|||
consts.ACTION_EXTRASPECS_POST: self.set_extra_specs,
|
||||
consts.ACTION_EXTRASPECS_DELETE: self.unset_extra_specs,
|
||||
}
|
||||
action = action_dict.keys()[0]
|
||||
action = list(action_dict.keys())[0]
|
||||
if action not in switcher.keys():
|
||||
LOG.error("Unsupported flavor action {}".format(action),
|
||||
extra=self.log_extra)
|
||||
|
@ -343,10 +343,10 @@ class ComputeSyncThread(SyncThread):
|
|||
metadata[metadatum] = None
|
||||
|
||||
try:
|
||||
flavor.unset_keys(metadata.keys())
|
||||
flavor.unset_keys(list(metadata.keys()))
|
||||
except novaclient_exceptions.NotFound:
|
||||
LOG.info("Extra-spec {} not found {}:{}"
|
||||
.format(metadata.keys(), rsrc, action_dict),
|
||||
.format(list(metadata.keys()), rsrc, action_dict),
|
||||
extra=self.log_extra)
|
||||
|
||||
def get_flavor_resources(self, nc):
|
||||
|
@ -449,9 +449,9 @@ class ComputeSyncThread(SyncThread):
|
|||
# Extra-spec needs to be audited. Extra-spec details are
|
||||
# filled in m_resources and sc_resources during query.
|
||||
metadata = {}
|
||||
for m_key, m_value in m_flavor.attach_es.iteritems():
|
||||
for m_key, m_value in m_flavor.attach_es.items():
|
||||
found = False
|
||||
for sc_key, sc_value in sc_es_attachment.iteritems():
|
||||
for sc_key, sc_value in sc_es_attachment.items():
|
||||
if m_key == sc_key and m_value == sc_value:
|
||||
found = True
|
||||
sc_es_attachment.pop(sc_key)
|
||||
|
@ -466,7 +466,7 @@ class ComputeSyncThread(SyncThread):
|
|||
num_of_audit_jobs += 1
|
||||
|
||||
keys_to_delete = ""
|
||||
for sc_key, sc_value in sc_es_attachment.iteritems():
|
||||
for sc_key, sc_value in sc_es_attachment.items():
|
||||
keys_to_delete += sc_key + ";"
|
||||
if keys_to_delete:
|
||||
action_dict = {consts.ACTION_EXTRASPECS_DELETE: keys_to_delete}
|
||||
|
|
|
@ -97,7 +97,7 @@ class Controller(object):
|
|||
|
||||
def handle_delayed_notifications(self):
|
||||
curr_time = datetime.datetime.utcnow()
|
||||
for system, notify_time in self.system_throttle_timers.iteritems():
|
||||
for system, notify_time in self.system_throttle_timers.items():
|
||||
if notify_time is not None:
|
||||
if curr_time > notify_time:
|
||||
self.send_notification(system)
|
||||
|
|
Loading…
Reference in New Issue