Skip to content
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
single performance file open, correct only access times variables if …
…tests ran
  • Loading branch information
slefrancois committed Oct 7, 2016
commit f14107d31d5cb05f192129a95d3f272acf4dbc09
48 changes: 23 additions & 25 deletions code/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,19 +152,27 @@ def do_tests():
saveto='')
return numpy.asarray(l)

# Initialize test count and results dictionnary
test_total = 0
times_dic = {}

#test in float64 in FAST_RUN mode on the cpu
import theano
if do_float64:
theano.config.floatX = 'float64'
theano.config.mode = 'FAST_RUN'
float64_times = do_tests()
times_dic['float64'] = float64_times
test_total += numpy.size(float64_times)
print(algo_executed, file=sys.stderr)
print('float64 times', float64_times, file=sys.stderr)

#test in float32 in FAST_RUN mode on the cpu
theano.config.floatX = 'float32'
if do_float32:
float32_times = do_tests()
times_dic['float32'] = float32_times
test_total += numpy.size(float32_times)
print(algo_executed, file=sys.stderr)
print('float32 times', float32_times, file=sys.stderr)

Expand All @@ -186,6 +194,8 @@ def do_tests():
if do_gpu:
theano.sandbox.cuda.use('gpu')
gpu_times = do_tests()
times_dic['gpu'] = gpu_times
test_total += numpy.size(gpu_times)
print(algo_executed, file=sys.stderr)
print('gpu times', gpu_times, file=sys.stderr)

Expand Down Expand Up @@ -213,30 +223,18 @@ def do_tests():
if do_float32 and do_gpu:
print('float32/gpu', float32_times / gpu_times, file=sys.stderr)

# Write JUnit xml for speed test performance report

speed_file = 'speedtests_time.xml'

# Define speed test file write method
def write_junit(filename, algos, times, label):
with open(filename, 'a') as f:
for algo, time in zip(algos, times):
f.write(' <testcase classname="{label}" name="{algo}" time="{time}">'
.format(label=label, algo=algo, time=time))
f.write(' </testcase>\n')

test_total = numpy.size(float64_times) \
+ numpy.size(float32_times) \
+ numpy.size(gpu_times)

with open(speed_file, 'w') as f:
# Generate JUnit performance report
# Define speedtest file write method
def write_junit(f, algos, times, label):
for algo, time in zip(algos, times):
f.write(' <testcase classname="{label}" name="{algo}" time="{time}">'
.format(label=label, algo=algo, time=time))
f.write(' </testcase>\n')

with open('speedtests_time.xml', 'w') as f:
f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
f.write('<testsuite name="theano_speedtests" tests="{ntests}">\n'
.format(ntests=numpy.size(test_total)))

write_junit(speed_file, algo_executed, float64_times, label='float64')
write_junit(speed_file, algo_executed, float32_times, label='float32')
write_junit(speed_file, algo_executed, gpu_times, label='gpu')

with open(speed_file, 'a') as f:
f.write('<testsuite name="dlt_speedtests" tests="{ntests}">\n'
.format(ntests=test_total))
for label, times in times_dic.items():
write_junit(f, algo_executed, times, label)
f.write('</testsuite>\n')