private import annotation
redef class ToolContext
- # -- target-file
- var opt_file = new OptionString("Specify test suite location", "-t", "--target-file")
# --pattern
var opt_pattern = new OptionString("Only run test case with name that match pattern", "-p", "--pattern")
+ # --autosav
+ var opt_autosav = new OptionBool("Automatically create/update .res files for black box testing", "--autosav")
end
# Used to test nitunit test files.
# `ModelBuilder` used to parse test files.
var mbuilder: ModelBuilder
- # Parse a file and return the contained `MModule`.
- private fun parse_module_unit(file: String): nullable MModule do
- var mmodule = mbuilder.parse([file]).first
- if mbuilder.get_mmodule_annotation("test_suite", mmodule) == null then return null
- mbuilder.run_phases
- return mmodule
- end
-
- # Compile and execute the test suite for a NitUnit `file`.
- fun test_module_unit(file: String): nullable TestSuite do
+ # Compile and execute `mmodule` as a test suite.
+ fun test_module_unit(mmodule: MModule): TestSuite do
var toolcontext = mbuilder.toolcontext
- var mmodule = parse_module_unit(file)
- # is the module a test_suite?
- if mmodule == null then return null
var suite = new TestSuite(mmodule, toolcontext)
# method to execute before all tests in the module
var before_module = mmodule.before_test
return
end
var include_dir = module_file.filename.dirname
- var cmd = "{nitc} --no-color '{file}.nit' -I {include_dir} -o '{file}.bin' > '{file}.out' 2>&1 </dev/null"
+ var cmd = "{nitc} --no-color -q '{file}.nit' -I {include_dir} -o '{file}.bin' > '{file}.out' 2>&1 </dev/null"
var res = toolcontext.safe_exec(cmd)
var f = new FileReader.open("{file}.out")
var msg = f.read_all
var test_file = test_suite.test_file
var res_name = "{test_file}_{method_name.escape_to_c}"
var res = toolcontext.safe_exec("{test_file}.bin {method_name} > '{res_name}.out1' 2>&1 </dev/null")
- self.raw_output = "{res_name}.out1".to_path.read_all
+ var raw_output = "{res_name}.out1".to_path.read_all
+ self.raw_output = raw_output
# set test case result
if res != 0 then
error = "Runtime Error in file {test_file}.nit"
var mmodule = test_method.mclassdef.mmodule
var file = mmodule.filepath
if file != null then
- var sav = file.dirname / mmodule.name + ".sav" / test_method.name + ".res"
- if sav.file_exists then
+ var tries = [ file.dirname / mmodule.name + ".sav" / test_method.name + ".res",
+ file.dirname / "sav" / test_method.name + ".res" ,
+ file.dirname / test_method.name + ".res" ]
+ var savs = [ for t in tries do if t.file_exists then t ]
+ if savs.length == 1 then
+ var sav = savs.first
toolcontext.info("Diff output with {sav}", 1)
res = toolcontext.safe_exec("diff -u --label 'expected:{sav}' --label 'got:{res_name}.out1' '{sav}' '{res_name}.out1' > '{res_name}.diff' 2>&1 </dev/null")
- if res != 0 then
+ if res == 0 then
+ # OK
+ else if toolcontext.opt_autosav.value then
+ raw_output.write_to_file(sav)
+ info = "Expected output updated: {sav} (--autoupdate)"
+ else
self.raw_output = "Diff\n" + "{res_name}.diff".to_path.read_all
error = "Difference with expected output: diff -u {sav} {res_name}.out1"
toolcontext.modelbuilder.failed_tests += 1
end
+ else if savs.length > 1 then
+ toolcontext.info("Conflicting diffs: {savs.join(", ")}", 1)
+ error = "Conflicting expected output: {savs.join(", ", " and ")} all exist"
+ toolcontext.modelbuilder.failed_tests += 1
else if not raw_output.is_empty then
- toolcontext.info("No diff: {sav} not found", 2)
+ toolcontext.info("No diff: {tries.join(", ", " or ")} not found", 1)
+ if toolcontext.opt_autosav.value then
+ var sav = tries.first
+ sav.dirname.mkdir
+ raw_output.write_to_file(sav)
+ info = "Expected output saved: {sav} (--autoupdate)"
+ end
end
end
end
redef class MClassDef
# Is the class a TestClass?
- # i.e. begins with "Test"
+ # i.e. is a subclass of `TestSuite`
private fun is_test: Bool do
var in_hierarchy = self.in_hierarchy
if in_hierarchy == null then return false
# Number of failed tests.
var failed_tests = 0
- # Run NitUnit test file for mmodule (if exists).
- fun test_unit(mmodule: MModule): HTMLTag do
- var ts = new HTMLTag("testsuite")
- toolcontext.info("nitunit: test-suite test_{mmodule}", 2)
- var f = toolcontext.opt_file.value
- var test_file = "test_{mmodule.name}.nit"
- if f != null then
- test_file = f
- else if not test_file.file_exists then
- var module_file = mmodule.location.file
- if module_file == null then
- toolcontext.info("Skip test for {mmodule}, no file found", 2)
- return ts
- end
- var include_dir = module_file.filename.dirname
- test_file = "{include_dir}/{test_file}"
- end
- if not test_file.file_exists then
- toolcontext.info("Skip test for {mmodule}, no file {test_file} found", 2)
- return ts
- end
+ # Run NitUnit test suite for `mmodule` (if it is one).
+ fun test_unit(mmodule: MModule): nullable HTMLTag do
+ # is the module a test_suite?
+ if get_mmodule_annotation("test_suite", mmodule) == null then return null
+ toolcontext.info("nitunit: test-suite {mmodule}", 2)
+
var tester = new NitUnitTester(self)
- var res = tester.test_module_unit(test_file)
- if res == null then
- toolcontext.info("Skip test for {mmodule}, no test suite found", 2)
- return ts
- end
+ var res = tester.test_module_unit(mmodule)
return res.to_xml
end
end