Python 3 compatibility: use print as a function.

In Python 3 print is a function.
Especially for multiple string print, need to import
print_function from __future__.

Story: 2002909

Task: 24560

Signed-off-by: zhangyangyang <zhangyangyang@unionpay.com>

Change-Id: Ie31eb59368af57776eb9785dba494432111cd250
This commit is contained in:
zhangyangyang 2018-08-29 17:28:31 +08:00
parent 65ef33d43f
commit fd33abe3f4
8 changed files with 76 additions and 76 deletions

View File

@ -78,7 +78,7 @@ def is_partitioning_correct(disk_path, partition_sizes):
output, _, _ = command(["udevadm", "settle", "-E", disk_node])
output, _, _ = command(["parted", "-s", disk_node, "print"])
if not re.search('Partition Table: gpt', output):
print "Format of disk node %s is not GPT, zapping disk" % disk_node
print("Format of disk node %s is not GPT, zapping disk" % disk_node)
return False
# Check each partition size
@ -93,7 +93,7 @@ def is_partitioning_correct(disk_path, partition_sizes):
regex = ("^Disk " + str(partition_node) + ":\\s*" +
str(size) + "[\\.0]*MiB")
if not re.search(regex, output, re.MULTILINE):
print ("Journal partition %(node)s size is not %(size)s, "
print("Journal partition %(node)s size is not %(size)s, "
"zapping disk" % {"node": partition_node, "size": size})
return False
@ -124,7 +124,7 @@ def create_partitions(disk_path, partition_sizes):
# Erase all partitions on current node by creating a new GPT table
_, err, ret = command(["parted", "-s", disk_node, "mktable", "gpt"])
if ret:
print ("Error erasing partition table of %(node)s\n"
print("Error erasing partition table of %(node)s\n"
"Return code: %(ret)s reason: %(reason)s" %
{"node": disk_node, "ret": ret, "reason": err})
exit(1)
@ -146,10 +146,10 @@ def create_partitions(disk_path, partition_sizes):
"start": used_space_mib,
"end": used_space_mib + size,
"reason": err}
print ("Created partition from start=%(start)s MiB to end=%(end)s MiB"
print("Created partition from start=%(start)s MiB to end=%(end)s MiB"
" on %(disk_node)s" % parms)
if ret:
print ("Failed to create partition with "
print("Failed to create partition with "
"start=%(start)s, end=%(end)s "
"on %(disk_node)s reason: %(reason)s" % parms)
exit(1)
@ -164,7 +164,7 @@ def create_partitions(disk_path, partition_sizes):
disk_node]
_, err, ret = command(cmd)
if ret:
print ("WARNINIG: Failed to set partition name and typecode")
print("WARNINIG: Failed to set partition name and typecode")
used_space_mib += size
num += 1
@ -189,10 +189,10 @@ def mount_data_partition(data_path, osdid):
_, _, ret = command(cmd)
params = {"node": data_node, "path": mount_path}
if ret:
print "Failed to mount %(node)s to %(path), aborting" % params
print("Failed to mount %(node)s to %(path), aborting" % params)
exit(1)
else:
print "Mounted %(node)s to %(path)s" % params
print("Mounted %(node)s to %(path)s" % params)
return mount_path
@ -224,9 +224,9 @@ def fix_location(mount_point, journal_path, osdid):
if os.path.lexists(path):
os.unlink(path) # delete the old symlink
os.symlink(new_target, path)
print "Symlink created: %(path)s -> %(target)s" % params
print("Symlink created: %(path)s -> %(target)s" % params)
except:
print "Failed to create symlink: %(path)s -> %(target)s" % params
print("Failed to create symlink: %(path)s -> %(target)s" % params)
exit(1)
# Fix journal_uuid
path = mount_point + "/journal_uuid"
@ -237,7 +237,7 @@ def fix_location(mount_point, journal_path, osdid):
# The operation is noncritical, it only makes 'ceph-disk list'
# display complete output. We log and continue.
params = {"path": path, "uuid": journal_uuid}
print "WARNING: Failed to set uuid of %(path)s to %(uuid)s" % params
print("WARNING: Failed to set uuid of %(path)s to %(uuid)s" % params)
# Clean the journal partition
# even if erasing the partition table, if another journal was present here
@ -257,10 +257,10 @@ def fix_location(mount_point, journal_path, osdid):
"ret": ret,
"reason": err}
if not ret:
print ("Prepared new journal partition: %(journal_node)s "
"for osd id: %(osdid)s") % params
print("Prepared new journal partition: %(journal_node)s "
"for osd id: %(osdid)s" % params)
else:
print ("Error initializing journal node: "
print("Error initializing journal node: "
"%(journal_node)s for osd id: %(osdid)s "
"ceph-osd return code: %(ret)s reason: %(reason)s" % params)
@ -293,7 +293,7 @@ def main(argv):
else:
err = True
if err:
print "Command intended for internal use only"
print("Command intended for internal use only")
exit(-1)
if partitions:
@ -302,7 +302,7 @@ def main(argv):
partitions['journals']):
create_partitions(partitions['disk_path'], partitions['journals'])
else:
print ("Partition table for %s is correct, "
print("Partition table for %s is correct, "
"no need to repartition" %
device_path_to_device_node(partitions['disk_path']))
elif location:
@ -313,14 +313,14 @@ def main(argv):
if not is_location_correct(mount_point,
location['journal_path'],
location['osdid']):
print ("Fixing journal location for "
print("Fixing journal location for "
"OSD id: %(id)s" % {"node": location['data_path'],
"id": location['osdid']})
fix_location(mount_point,
location['journal_path'],
location['osdid'])
else:
print ("Journal location for %s is correct,"
print("Journal location for %s is correct,"
"no need to change it" % location['data_path'])
main(sys.argv[1:])

View File

@ -1161,23 +1161,23 @@ index 808e53a..9265cd9 100644
self._firstconflicts = True
@@ -711,6 +734,22 @@ class TextOutput(NullOutput):
name = str(prvpkg)
print " ", "%s (%s)" % (name, prv)
print(" ", "%s (%s)" % (name, prv))
+ def showRecommends(self, pkg, rec):
+ if self._firstrecommends:
+ self._firstrecommends = False
+ print " ", _("Recommends:")
+ print " ", rec
+ print(" ", _("Recommends:"))
+ print(" ", rec)
+
+ def showRecommendsProvidedBy(self, pkg, req, prv, prvpkg):
+ if self._firstrecommendsprovidedby:
+ self._firstrecommendsprovidedby = False
+ print " ", _("Provided By:")
+ print(" ", _("Provided By:"))
+ if self.opts.hide_version:
+ name = prvpkg.name
+ else:
+ name = str(prvpkg)
+ print " ", "%s (%s)" % (name, prv)
+ print(" ", "%s (%s)" % (name, prv))
+
def showUpgrades(self, pkg, upg):
if self._firstupgrades:
@ -1189,14 +1189,14 @@ index 808e53a..9265cd9 100644
+ def showRecommends(self, pkg, req):
+ if (pkg, req) not in self._shown:
+ self._shown[pkg, req] = True
+ print ' "%s" -> "Recommends: %s";' % (pkg, req)
+ print(' "%s" -> "Recommends: %s";' % (pkg, req))
+
+ def showRecommendsProvidedBy(self, pkg, req, prv, prvpkg):
+ self.showPackage(prvpkg)
+ self.showProvides(prvpkg, prv)
+ if (req, prv) not in self._shown:
+ self._shown[req, prv] = True
+ print ' "Recommends: %s" -> "Provides: %s";' % (req, prv)
+ print(' "Recommends: %s" -> "Provides: %s";' % (req, prv))
+
def showUpgrades(self, pkg, upg):
if (pkg, upg) not in self._shown:

View File

@ -112,9 +112,9 @@ def main():
logger = Logger(logging.DEBUG).get_logger()
logger.info("Starting....")
logger.info("Parsed options: %s" % options)
print logger
print(logger)
buddy = BuddyInfo(logger)
print buddy
print(buddy)
if __name__ == '__main__':
main()

View File

@ -1218,7 +1218,7 @@ def checkDuration(duration):
return None
else:
time.sleep(duration)
print "Duration interval has ended. Killing processes now"
print("Duration interval has ended. Killing processes now")
logging.warning("Duration interval has ended. Killing processes now")
raise KeyboardInterrupt
@ -1262,7 +1262,7 @@ def createDB(influx_info, grafana_port, grafana_api_key):
if p is not None:
p.kill()
except Exception as e:
print e.message
print(e.message)
sys.exit(0)
@ -1277,7 +1277,7 @@ def deleteDB(influx_info, grafana_port, grafana_api_key):
if answer is None or answer == "" or answer == "y" or answer == "yes":
try:
logging.info("Removing database from InfluxDB and Grafana")
print "Removing database from InfluxDB and Grafana. Please wait..."
print("Removing database from InfluxDB and Grafana. Please wait...")
# delete database from InfluxDB
p = Popen("curl -s -XPOST 'http://'{}':'{}'/query' --data-urlencode 'q=DROP DATABASE {}'".format(influx_info[0], influx_info[1], influx_info[2]), shell=True, stdout=PIPE)
response = p.stdout.read().strip("\n")
@ -1306,7 +1306,7 @@ def deleteDB(influx_info, grafana_port, grafana_api_key):
if p is not None:
p.kill()
except Exception as e:
print e.message
print(e.message)
sys.exit(0)
@ -1322,7 +1322,7 @@ def appendToFile(file, content):
if __name__ == "__main__":
# make sure user is root
if os.geteuid() != 0:
print "Must be run as root!\n"
print("Must be run as root!\n")
sys.exit(0)
# initialize variables
@ -1432,7 +1432,7 @@ if __name__ == "__main__":
else:
SERVICES[service_tuple[0]] = {'name': service_tuple[1], 'api-port': None}
except Exception:
print "An error has occurred when parsing the engtools.conf configuration file: {}".format(sys.exc_info())
print("An error has occurred when parsing the engtools.conf configuration file: {}".format(sys.exc_info()))
sys.exit(0)
syseng_services = live_svc + static_svcs
@ -1579,7 +1579,7 @@ if __name__ == "__main__":
tasks.append(p)
p.start()
print "Sending data to InfluxDB. Please tail /tmp/livestream.log"
print("Sending data to InfluxDB. Please tail /tmp/livestream.log")
checkDuration(duration)
# give a small delay to ensure services have started

View File

@ -436,23 +436,23 @@ if __name__ == "__main__":
pool_size = len(controller_list) + len(compute_list) + len(storage_list)
if options.file_list is not None and options.parse_all is True:
print "You cannot use the -a option with the -f option"
print("You cannot use the -a option with the -f option")
sys.exit(0)
if options.postgres_list is not None and options.file_list is not None:
print "You cannot use the -p option with the -f option"
print("You cannot use the -p option with the -f option")
sys.exit(0)
if options.parse_all is True and options.node_list is not None:
print "You cannot use the -a option with the -n option. Ex: -n controller-0"
print("You cannot use the -a option with the -n option. Ex: -n controller-0")
sys.exit(0)
if options.file_list is not None and options.node_list is None:
print "You must specify a node and a file. Ex: -n controller-0 -f postgres-conns.csv"
print("You must specify a node and a file. Ex: -n controller-0 -f postgres-conns.csv")
sys.exit(0)
working_dir = os.getcwd()
pool = Pool(processes=pool_size)
proc_list = []
print "Sending data to InfluxDB. Please tail /tmp/csv-to-influx.log"
print("Sending data to InfluxDB. Please tail /tmp/csv-to-influx.log")
# create a process per node
if len(controller_list) > 0:

View File

@ -200,7 +200,7 @@ def print_disk_view(rows=None, extended=False):
if len(rows) > 0:
print
print "DISKs: (Physical disk view)"
print("DISKs: (Physical disk view)")
pt = PrettyTable(disk_lables_extended) if extended else \
PrettyTable(disk_lables_brief)
@ -210,9 +210,9 @@ def print_disk_view(rows=None, extended=False):
if len(r) == len(pt.field_names):
pt.add_row(r)
else:
print "Disk row has incorrect number of values: %s" % r
print("Disk row has incorrect number of values: %s" % r)
print pt
print(pt)
def print_vg_view(rows=None, extended=False):
@ -228,7 +228,7 @@ def print_vg_view(rows=None, extended=False):
if len(rows) > 0:
print
print "VOLUME GROUPS: (VG view)"
print("VOLUME GROUPS: (VG view)")
pt = PrettyTable(vg_labels_extended) if extended else \
PrettyTable(vg_labels_brief)
@ -240,9 +240,9 @@ def print_vg_view(rows=None, extended=False):
if len(r) == len(pt.field_names):
pt.add_row(r)
else:
print "VG row has incorrect number of values: %s" % r
print("VG row has incorrect number of values: %s" % r)
print pt
print(pt)
def get_info_and_display(cc, show=None):
@ -409,8 +409,8 @@ def main():
prog = os.path.basename(sys.argv[0])
ts = datetime.datetime.now()
if show['debug']:
print "%s: %s options: view:%s System api version: %s" \
% (prog, ts.isoformat(), show, api_version)
print("%s: %s options: view:%s System api version: %s"
% (prog, ts.isoformat(), show, api_version))
cgts_client_creds = get_system_creds()
if not cgts_client_creds['os_username']:

View File

@ -855,7 +855,7 @@ def print_all_tables(tenants=None,
# Print list of aggregates
if show['aggregates']:
print
print "AGGREGATES:"
print("AGGREGATES:")
pt = PrettyTable(
['Name',
'Avail Zone',
@ -870,13 +870,13 @@ def print_all_tables(tenants=None,
", ".join([str(x) for x in A.hosts]),
str(A.metadata)
])
print pt
print(pt)
# Print list of compute host hypervisors, showing per numa details
if show['computes']:
print
print 'COMPUTE HOSTS: ' \
'Legend: U = Used, A = Avail'
print('COMPUTE HOSTS: '
'Legend: U = Used, A = Avail')
pt = PrettyTable(
['Host',
'status',
@ -1018,7 +1018,7 @@ def print_all_tables(tenants=None,
# Print list of compute hosts topology
if show['topology']:
print
print 'LOGICAL CPU TOPOLOGY (compute hosts):'
print('LOGICAL CPU TOPOLOGY (compute hosts):')
for host_name, topology in sorted(topologies.iteritems(),
key=lambda (k, v): (natural_keys(k))):
H = hypervisors[host_name]
@ -1042,13 +1042,13 @@ def print_all_tables(tenants=None,
n_cores = len(topology[socket_id].keys())
n_threads = len(topology[socket_id][core_id].keys())
print '%s: Model:%s, Arch:%s, Vendor:%s, ' \
'Sockets=%d, Cores/Socket=%d, Threads/Core=%d, Logical=%d' \
print('%s: Model:%s, Arch:%s, Vendor:%s, '
'Sockets=%d, Cores/Socket=%d, Threads/Core=%d, Logical=%d'
% (host_name,
H.cpu_info['model'],
H.cpu_info['arch'],
H.cpu_info['vendor'],
n_sockets, n_cores, n_threads, len(cpu_ids))
n_sockets, n_cores, n_threads, len(cpu_ids)))
# cpu_id row
L = ['cpu_id']
@ -1076,13 +1076,13 @@ def print_all_tables(tenants=None,
{L.append(','.join(
str(s) for s in siblings[i]) or '-') for i in cpu_ids}
pt.add_row(L)
print pt
print(pt)
print
# Print list of compute hosts topology
if show['topology-long']:
print
print 'LOGICAL CPU TOPOLOGY (compute hosts):'
print('LOGICAL CPU TOPOLOGY (compute hosts):')
for host_name, topology in sorted(topologies.iteritems(),
key=lambda (k, v): (natural_keys(k))):
H = hypervisors[host_name]
@ -1106,13 +1106,13 @@ def print_all_tables(tenants=None,
n_cores = len(topology[socket_id].keys())
n_threads = len(topology[socket_id][core_id].keys())
print '%s: Model:%s, Arch:%s, Vendor:%s, ' \
'Sockets=%d, Cores/Socket=%d, Threads/Core=%d, Logical=%d' \
print('%s: Model:%s, Arch:%s, Vendor:%s, '
'Sockets=%d, Cores/Socket=%d, Threads/Core=%d, Logical=%d'
% (host_name,
H.cpu_info['model'],
H.cpu_info['arch'],
H.cpu_info['vendor'],
n_sockets, n_cores, n_threads, len(cpu_ids))
n_sockets, n_cores, n_threads, len(cpu_ids)))
pt = PrettyTable(
['cpu_id',
'socket_id',
@ -1132,14 +1132,14 @@ def print_all_tables(tenants=None,
list_to_range(siblings[i]) or '-',
'0x%x' % (1 << i)
])
print pt
print(pt)
print
# Print list of servers
if show['servers']:
re_server_group = re.compile(r'^(\S+)\s+\((\S+)\)$')
print
print 'SERVERS (nova view):'
print('SERVERS (nova view):')
pt = PrettyTable(
['tenant',
'ID',
@ -1237,8 +1237,8 @@ def print_all_tables(tenants=None,
# Print each libvirt domain info
if show['libvirt']:
print
print 'SERVERS (libvirt view): ' \
'Legend: cpulist = [pcpu[i], ...]'
print('SERVERS (libvirt view): '
'Legend: cpulist = [pcpu[i], ...]')
pt = PrettyTable(
['uuid',
'instance_name',
@ -1278,7 +1278,7 @@ def print_all_tables(tenants=None,
# Print list of in-progress migrations
if show['migrations']:
print
print "MIGRATIONS (in progress): Legend: S=Source, D=Destination"
print("MIGRATIONS (in progress): Legend: S=Source, D=Destination")
pt = PrettyTable(
['ID',
'status',
@ -1310,7 +1310,7 @@ def print_all_tables(tenants=None,
pp = pprint.PrettyPrinter(indent=1, width=40)
if show['flavors']:
print
print "FLAVORS (in use):"
print("FLAVORS (in use):")
pt = PrettyTable(
['id',
'name',
@ -1342,13 +1342,13 @@ def print_all_tables(tenants=None,
F.is_public,
pp.pformat(extra_specs[F.id]),
])
print pt
print(pt)
# Print images for instances currently in use
pp = pprint.PrettyPrinter(indent=1, width=40)
if show['images']:
print
print "IMAGES (in use):"
print("IMAGES (in use):")
pt = PrettyTable(
['id',
'name',
@ -1373,12 +1373,12 @@ def print_all_tables(tenants=None,
I.status,
I.properties,
])
print pt
print(pt)
# Print server groups for instances currently in use (exclude members data)
if show['server_groups']:
print
print "SERVER GROUPS (in use):"
print("SERVER GROUPS (in use):")
pt = PrettyTable(
['Tenant',
'Id',
@ -1398,7 +1398,7 @@ def print_all_tables(tenants=None,
str(S.policies),
str(S.metadata),
])
print pt
print(pt)
def _get_host_id(tenant_id=None, host_name=None):
@ -2108,12 +2108,12 @@ def get_info_and_display(show=None):
# Print out warnings if we detect mismatches between nova and libvirt
if warnings:
print
print "WARNINGS (mismatch):"
print("WARNINGS (mismatch):")
pt = PrettyTable(['Message'])
pt.align = 'l'
for W in warnings:
pt.add_row([W])
print pt
print(pt)
if True in debug.values():
logger.debug('done.')
@ -2126,7 +2126,7 @@ def main():
try:
# Enforce 'root' access since we need to read nova.conf .
if os.geteuid() != 0:
print ('Require sudo/root.')
print('Require sudo/root.')
os.execvp('sudo', ['sudo'] + sys.argv)
# Process command line options and arguments, configure logging,
@ -2136,7 +2136,7 @@ def main():
# Print selected options, and timestamp
prog = os.path.basename(sys.argv[0])
ts = datetime.datetime.now()
print "%s: %s options: show:%s" % (prog, ts.isoformat(), show['show'])
print("%s: %s options: show:%s" % (prog, ts.isoformat(), show['show']))
if show['volumes']:
logger.info('volumes selected: displaying will take some time')

View File

@ -185,7 +185,7 @@ def main():
if len(sys.argv) == 2 :
licensefile = sys.argv[1]
else:
print "Usage: verify-license <license file>"
print("Usage: verify-license <license file>")
exit(-1)
try: