nitc :: TestCase :: defaultinit
# A test case is a unit test considering only a `MMethodDef`.
class TestCase
super UnitTest
# Test suite wich `self` belongs to.
var test_suite: TestSuite
# Test method to be compiled and tested.
var test_method: MMethodDef
# Cases to execute before this one
var before = new Array[MMethodDef]
# Cases to execute after this one
var after = new Array[MMethodDef]
redef fun full_name do return test_method.full_name
redef fun location do return test_method.location
# `ToolContext` to use to display messages and find `nitc` bin.
var toolcontext: ToolContext
# Generate the test unit for `self` in `file`.
fun write_to_nit(file: Template) do
var name = test_method.name
file.addn "if name == \"{name}\" then"
if test_method.mproperty.is_toplevel then
file.addn "\t{name}"
else
file.addn "\tvar subject = new {test_method.mclassdef.name}.intern"
for mmethod in before do
file.addn "\tsubject.{mmethod.name}"
end
file.addn "\tsubject.{name}"
for mmethod in after do
file.addn "\tsubject.{mmethod.name}"
end
end
file.addn "end"
end
# Execute the test case.
fun run do
toolcontext.info("Execute test-case {test_method.name}", 1)
was_exec = true
if toolcontext.opt_noact.value then return
# execute
var method_name = test_method.name
var test_file = test_suite.test_file
var res_name = "{test_file}_{method_name.escape_to_c}"
var clock = new Clock
var res = toolcontext.safe_exec("{test_file}.bin {method_name} > '{res_name}.out1' 2>&1 </dev/null")
if not toolcontext.opt_no_time.value then real_time = clock.total
var raw_output = "{res_name}.out1".to_path.read_all
self.raw_output = raw_output
# set test case result
if res != 0 then
error = "Runtime Error in file {test_file}.nit"
toolcontext.modelbuilder.failed_tests += 1
else
# no error, check with res file, if any.
var mmodule = test_method.mclassdef.mmodule
var file = mmodule.filepath
if file != null then
var tries = [ file.dirname / mmodule.name + ".sav" / test_method.name + ".res",
file.dirname / "sav" / test_method.name + ".res" ,
file.dirname / test_method.name + ".res" ]
var savs = [ for t in tries do if t.file_exists then t ]
if savs.length == 1 then
var sav = savs.first
toolcontext.info("Diff output with {sav}", 1)
res = toolcontext.safe_exec("diff -u --label 'expected:{sav}' --label 'got:{res_name}.out1' '{sav}' '{res_name}.out1' > '{res_name}.diff' 2>&1 </dev/null")
if res == 0 then
# OK
else if toolcontext.opt_autosav.value then
raw_output.write_to_file(sav)
info = "Expected output updated: {sav} (--autoupdate)"
else
self.raw_output = "Diff\n" + "{res_name}.diff".to_path.read_all
error = "Difference with expected output: diff -u {sav} {res_name}.out1"
toolcontext.modelbuilder.failed_tests += 1
end
else if savs.length > 1 then
toolcontext.info("Conflicting diffs: {savs.join(", ")}", 1)
error = "Conflicting expected output: {savs.join(", ", " and ")} all exist"
toolcontext.modelbuilder.failed_tests += 1
else if not raw_output.is_empty then
toolcontext.info("No diff: {tries.join(", ", " or ")} not found", 1)
if toolcontext.opt_autosav.value then
var sav = tries.first
sav.dirname.mkdir
raw_output.write_to_file(sav)
info = "Expected output saved: {sav} (--autoupdate)"
end
end
end
end
is_done = true
end
# Make the test case fail without testing it
#
# Useful when the compilation or the before_test failed.
fun fail(message: String) do
is_done = true
error = message
toolcontext.modelbuilder.failed_tests += 1
end
redef fun xml_classname do
var a = test_method.full_name.split("$")
return "nitunit.{a[0]}.{a[1]}"
end
redef fun xml_name do
var a = test_method.full_name.split("$")
return a[2]
end
end
src/testing/testing_suite.nit:348,1--468,3