Skip to content

Commit fa27f5d

Browse files
cclausstseaver
authored andcommitted
Spanner benchmarks: print() is a function in Python 3 (#5862)
1 parent 90f3d0b commit fa27f5d

File tree

2 files changed

+25
-25
lines changed

2 files changed

+25
-25
lines changed

bigquery/benchmark/benchmark.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,5 +42,5 @@
4242
raise Exception('found {0} columsn, expected {1}'.format(len(row), num_cols))
4343
num_rows += 1
4444
total_time = datetime.now() - start_time
45-
print "query {0}: {1} rows, {2} cols, first byte {3} sec, total {4} sec"\
46-
.format(query, num_rows, num_cols, first_byte_time.total_seconds(), total_time.total_seconds())
45+
print("query {0}: {1} rows, {2} cols, first byte {3} sec, total {4} sec"
46+
.format(query, num_rows, num_cols, first_byte_time.total_seconds(), total_time.total_seconds()))

spanner/benchmark/ycsb.py

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -147,38 +147,38 @@ def aggregate_metrics(latencies_ms, duration_ms, num_bucket):
147147
latency in latencies_ms.iteritems()}
148148
overall_op_count = sum([op_count for op_count in op_counts.itervalues()])
149149

150-
print '[OVERALL], RunTime(ms), %f' % duration_ms
151-
print '[OVERALL], Throughput(ops/sec), %f' % (float(overall_op_count) /
152-
duration_ms * 1000.0)
150+
print('[OVERALL], RunTime(ms), %f' % duration_ms)
151+
print('[OVERALL], Throughput(ops/sec), %f' % (float(overall_op_count) /
152+
duration_ms * 1000.0))
153153

154154
for operation in op_counts.keys():
155155
operation_upper = operation.upper()
156-
print '[%s], Operations, %d' % (operation_upper, op_counts[operation])
157-
print '[%s], AverageLatency(us), %f' % (
158-
operation_upper, numpy.average(latencies_ms[operation]) * 1000.0)
159-
print '[%s], LatencyVariance(us), %f' % (
160-
operation_upper, numpy.var(latencies_ms[operation]) * 1000.0)
161-
print '[%s], MinLatency(us), %f' % (
162-
operation_upper, min(latencies_ms[operation]) * 1000.0)
163-
print '[%s], MaxLatency(us), %f' % (
164-
operation_upper, max(latencies_ms[operation]) * 1000.0)
165-
print '[%s], 95thPercentileLatency(us), %f' % (
156+
print('[%s], Operations, %d' % (operation_upper, op_counts[operation]))
157+
print('[%s], AverageLatency(us), %f' % (
158+
operation_upper, numpy.average(latencies_ms[operation]) * 1000.0))
159+
print('[%s], LatencyVariance(us), %f' % (
160+
operation_upper, numpy.var(latencies_ms[operation]) * 1000.0))
161+
print('[%s], MinLatency(us), %f' % (
162+
operation_upper, min(latencies_ms[operation]) * 1000.0))
163+
print('[%s], MaxLatency(us), %f' % (
164+
operation_upper, max(latencies_ms[operation]) * 1000.0))
165+
print('[%s], 95thPercentileLatency(us), %f' % (
166166
operation_upper,
167-
numpy.percentile(latencies_ms[operation], 95.0) * 1000.0)
168-
print '[%s], 99thPercentileLatency(us), %f' % (
167+
numpy.percentile(latencies_ms[operation], 95.0) * 1000.0))
168+
print('[%s], 99thPercentileLatency(us), %f' % (
169169
operation_upper,
170-
numpy.percentile(latencies_ms[operation], 99.0) * 1000.0)
171-
print '[%s], 99.9thPercentileLatency(us), %f' % (
170+
numpy.percentile(latencies_ms[operation], 99.0) * 1000.0))
171+
print('[%s], 99.9thPercentileLatency(us), %f' % (
172172
operation_upper,
173-
numpy.percentile(latencies_ms[operation], 99.9) * 1000.0)
174-
print '[%s], Return=OK, %d' % (operation_upper, op_counts[operation])
173+
numpy.percentile(latencies_ms[operation], 99.9) * 1000.0))
174+
print('[%s], Return=OK, %d' % (operation_upper, op_counts[operation]))
175175
latency_array = numpy.array(latencies_ms[operation])
176176
for j in range(num_bucket):
177-
print '[%s], %d, %d' % (
177+
print('[%s], %d, %d' % (
178178
operation_upper, j,
179-
((j <= latency_array) & (latency_array < (j + 1))).sum())
180-
print '[%s], >%d, %d' % (operation_upper, num_bucket,
181-
(num_bucket <= latency_array).sum())
179+
((j <= latency_array) & (latency_array < (j + 1))).sum()))
180+
print('[%s], >%d, %d' % (operation_upper, num_bucket,
181+
(num_bucket <= latency_array).sum()))
182182

183183

184184
class WorkloadThread(threading.Thread):

0 commit comments

Comments
 (0)