nitunit: do not execute a before/after test twice
[nit.git] / src / testing / testing_suite.nit
index acf5650..2141824 100644 (file)
@@ -17,12 +17,14 @@ module testing_suite
 
 import testing_base
 import html
+private import parse_annotations
+private import realtime
 
 redef class ToolContext
-       # -- target-file
-       var opt_file = new OptionString("Specify test suite location.", "-t", "--target-file")
        # --pattern
-       var opt_pattern = new OptionString("Only run test case with name that match pattern. Examples: 'TestFoo', 'TestFoo*', 'TestFoo::test_foo', 'TestFoo::test_foo*', 'test_foo', 'test_foo*'", "-p", "--pattern")
+       var opt_pattern = new OptionString("Only run test case with name that match pattern", "-p", "--pattern")
+       # --autosav
+       var opt_autosav = new OptionBool("Automatically create/update .res files for black box testing", "--autosav")
 end
 
 # Used to test nitunit test files.
@@ -31,49 +33,38 @@ class NitUnitTester
        # `ModelBuilder` used to parse test files.
        var mbuilder: ModelBuilder
 
-       # Parse a file and return the contained `MModule`.
-       private fun parse_module_unit(file: String): nullable MModule do
-               var mmodule = mbuilder.parse([file]).first
-               if mbuilder.get_mmodule_annotation("test_suite", mmodule) == null then return null
-               mbuilder.run_phases
-               return mmodule
-       end
-
-       # Compile and execute the test suite for a NitUnit `file`.
-       fun test_module_unit(file: String): nullable TestSuite do
+       # Compile and execute `mmodule` as a test suite.
+       fun test_module_unit(mmodule: MModule): TestSuite do
                var toolcontext = mbuilder.toolcontext
-               var mmodule = parse_module_unit(file)
-               # is the module a test_suite?
-               if mmodule == null then return null
                var suite = new TestSuite(mmodule, toolcontext)
                # method to execute before all tests in the module
-               var before_module = mmodule.before_test
-               if before_module != null then
+               for mmethod in mmodule.before_all do
                        toolcontext.modelbuilder.total_tests += 1
-                       suite.before_module = new TestCase(suite, before_module, toolcontext)
+                       suite.before_all.add new TestCase(suite, mmethod, toolcontext)
                end
                # generate all test cases
                for mclassdef in mmodule.mclassdefs do
                        if not mclassdef.is_test then continue
                        if not suite_match_pattern(mclassdef) then continue
                        toolcontext.modelbuilder.total_classes += 1
-                       var before_test = mclassdef.before_test
-                       var after_test = mclassdef.after_test
+
+                       var before = mclassdef.before
+                       var after = mclassdef.after
+
                        for mpropdef in mclassdef.mpropdefs do
                                if not mpropdef isa MMethodDef or not mpropdef.is_test then continue
                                if not case_match_pattern(mpropdef) then continue
                                toolcontext.modelbuilder.total_tests += 1
                                var test = new TestCase(suite, mpropdef, toolcontext)
-                               test.before_test = before_test
-                               test.after_test = after_test
-                               suite.add_test test
+                               test.before = before
+                               test.after = after
+                               suite.test_cases.add test
                        end
                end
                # method to execute after all tests in the module
-               var after_module = mmodule.after_test
-               if after_module != null then
+               for mmethod in mmodule.after_all do
                        toolcontext.modelbuilder.total_tests += 1
-                       suite.after_module = new TestCase(suite, after_module, toolcontext)
+                       suite.after_all.add new TestCase(suite, mmethod, toolcontext)
                end
                suite.run
                return suite
@@ -126,33 +117,97 @@ class TestSuite
        # List of `TestCase` to be executed in this suite.
        var test_cases = new Array[TestCase]
 
-       # Add a `TestCase` to the suite.
-       fun add_test(case: TestCase) do test_cases.add case
+       # Tests to be executed before the whole test suite.
+       var before_all = new Array[TestCase]
 
-       # Test to be executed before the whole test suite.
-       var before_module: nullable TestCase = null
+       # Tests to be executed after the whole test suite.
+       var after_all = new Array[TestCase]
 
-       # Test to be executed after the whole test suite.
-       var after_module: nullable TestCase = null
+       # Display test suite status in std-out.
+       fun show_status do
+               var test_cases = self.test_cases.to_a
+               test_cases.add_all before_all
+               test_cases.add_all after_all
+               toolcontext.show_unit_status("Test-suite of module " + mmodule.full_name, test_cases)
+       end
 
        # Execute the test suite
        fun run do
+               set_env
+               show_status
                if not toolcontext.test_dir.file_exists then
                        toolcontext.test_dir.mkdir
                end
+               write_to_nit
+               compile
+               if failure != null then
+                       for case in test_cases do
+                               case.fail "Compilation Error"
+                               case.raw_output = failure
+                               toolcontext.clear_progress_bar
+                               toolcontext.show_unit(case)
+                       end
+                       show_status
+                       print ""
+                       return
+               end
                toolcontext.info("Execute test-suite {mmodule.name}", 1)
-               var before_module = self.before_module
-               if not before_module == null then run_case(before_module)
-               for case in test_cases do run_case(case)
-               var after_module = self.after_module
-               if not after_module == null then run_case(after_module)
+
+               for before_module in before_all do
+                       before_module.run
+                       toolcontext.clear_progress_bar
+                       toolcontext.show_unit(before_module)
+                       if before_module.error != null then
+                               for case in test_cases do
+                                       case.fail "Nitunit Error: before_module test failed"
+                                       toolcontext.clear_progress_bar
+                                       toolcontext.show_unit(case)
+                               end
+                               for after_module in after_all do
+                                       after_module.fail "Nitunit Error: before_module test failed"
+                                       toolcontext.clear_progress_bar
+                                       toolcontext.show_unit(after_module)
+                               end
+                               show_status
+                               print ""
+                               return
+                       end
+               end
+
+               for case in test_cases do
+                       case.run
+                       toolcontext.clear_progress_bar
+                       toolcontext.show_unit(case)
+                       show_status
+               end
+
+               for after_module in after_all do
+                       after_module.run
+                       toolcontext.clear_progress_bar
+                       toolcontext.show_unit(after_module)
+                       show_status
+               end
+
+               show_status
+               print ""
        end
 
-       # Execute a test case
-       fun run_case(test_case: TestCase) do
-               test_case.write_to_nit
-               test_case.compile
-               test_case.run
+       # Write the test unit for `self` in a nit compilable file.
+       fun write_to_nit do
+               var file = new Template
+               file.addn "intrude import core"
+               file.addn "import {mmodule.name}\n"
+               file.addn "var name = args.first"
+               for before_module in before_all do
+                       before_module.write_to_nit(file)
+               end
+               for case in test_cases do
+                       case.write_to_nit(file)
+               end
+               for after_module in after_all do
+                       after_module.write_to_nit(file)
+               end
+               file.write_to_file("{test_file}.nit")
        end
 
        # Return the test suite in XML format compatible with Jenkins.
@@ -163,10 +218,49 @@ class TestSuite
                for test in test_cases do n.add test.to_xml
                return n
        end
+
+       # Generated test file name.
+       fun test_file: String do
+               return toolcontext.test_dir / "gen_{mmodule.name.escape_to_c}"
+       end
+
+       # Compile all `test_cases` cases in one file.
+       fun compile do
+               # find nitc
+               var nitc = toolcontext.find_nitc
+               # compile test suite
+               var file = test_file
+               var module_file = mmodule.location.file
+               if module_file == null then
+                       toolcontext.error(null, "Error: cannot find module file for {mmodule.name}.")
+                       toolcontext.check_errors
+                       return
+               end
+               var include_dir = module_file.filename.dirname
+               var cmd = "{nitc} --no-color -q '{file}.nit' -I {include_dir} -o '{file}.bin' > '{file}.out' 2>&1 </dev/null"
+               var res = toolcontext.safe_exec(cmd)
+               var f = new FileReader.open("{file}.out")
+               var msg = f.read_all
+               f.close
+               if res != 0 then
+                       failure = msg
+               end
+       end
+
+       # Set environment variables for test suite execution
+       fun set_env do
+               var loc = mmodule.location.file
+               if loc == null then return
+               toolcontext.set_testing_path(loc.filename)
+       end
+
+       # Error occured during test-suite compilation.
+       var failure: nullable String = null
 end
 
 # A test case is a unit test considering only a `MMethodDef`.
 class TestCase
+       super UnitTest
 
        # Test suite wich `self` belongs to.
        var test_suite: TestSuite
@@ -174,66 +268,36 @@ class TestCase
        # Test method to be compiled and tested.
        var test_method: MMethodDef
 
-       # `ToolContext` to use to display messages and find `nitg` bin.
-       var toolcontext: ToolContext
+       # Cases to execute before this one
+       var before = new Array[MMethodDef]
 
-       # `MMethodDef` to call before the test case.
-       var before_test: nullable MMethodDef = null
+       # Cases to execute after this one
+       var after = new Array[MMethodDef]
 
-       # `MMethodDef` to call after the test case.
-       var after_test: nullable MMethodDef = null
+       redef fun full_name do return test_method.full_name
 
-       # Generated test file name.
-       fun test_file: String do
-               var dir = toolcontext.test_dir
-               var mod = test_method.mclassdef.mmodule.name
-               var cls = test_method.mclassdef.name
-               var name = test_method.name
-               return "{dir}/{mod}_{cls}_{name}"
-       end
+       redef fun location do return test_method.location
 
-       # Generate the test unit in a nit file.
-       fun write_to_nit do
+       # `ToolContext` to use to display messages and find `nitc` bin.
+       var toolcontext: ToolContext
+
+       # Generate the test unit for `self` in `file`.
+       fun write_to_nit(file: Template) do
                var name = test_method.name
-               var file = new Template
-               file.addn "intrude import test_suite"
-               file.addn "import {test_method.mclassdef.mmodule.name}\n"
+               file.addn "if name == \"{name}\" then"
                if test_method.mproperty.is_toplevel then
-                       file.addn name
+                       file.addn "\t{name}"
                else
-                       file.addn "var subject = new {test_method.mclassdef.name}.nitunit"
-                       if before_test != null then file.addn "subject.{before_test.name}"
-                       file.addn "subject.{name}"
-                       if after_test != null then file.addn "subject.{after_test.name}"
-               end
-               file.write_to_file("{test_file}.nit")
-       end
-
-       # Compile all test cases in once.
-       fun compile do
-               # find nitg
-               var nit_dir = toolcontext.nit_dir
-               var nitg = "{nit_dir or else ""}/bin/nitg"
-               if nit_dir == null or not nitg.file_exists then
-                       toolcontext.error(null, "Cannot find nitg. Set envvar NIT_DIR.")
-                       toolcontext.check_errors
-               end
-               # compile test suite
-               var file = test_file
-               var include_dir = test_method.mclassdef.mmodule.location.file.filename.dirname
-               var cmd = "{nitg} --no-color '{file}.nit' -I {include_dir} -o '{file}.bin' > '{file}.out' 2>&1 </dev/null"
-               var res = sys.system(cmd)
-               var f = new IFStream.open("{file}.out")
-               var msg = f.read_all
-               f.close
-               # set test case result
-               var loc = test_method.location
-               if res != 0 then
-                       failure = msg
-                       toolcontext.warning(loc, "FAILURE: {test_method.name} (in file {file}.nit): {msg}")
-                       toolcontext.modelbuilder.failed_tests += 1
+                       file.addn "\tvar subject = new {test_method.mclassdef.name}.intern"
+                       for mmethod in before do
+                               file.addn "\tsubject.{mmethod.name}"
+                       end
+                       file.addn "\tsubject.{name}"
+                       for mmethod in after do
+                               file.addn "\tsubject.{mmethod.name}"
+                       end
                end
-               toolcontext.check_errors
+               file.addn "end"
        end
 
        # Execute the test case.
@@ -242,155 +306,196 @@ class TestCase
                was_exec = true
                if toolcontext.opt_noact.value then return
                # execute
-               var file = test_file
-               var res = sys.system("./{file}.bin > '{file}.out1' 2>&1 </dev/null")
-               var f = new IFStream.open("{file}.out1")
-               var msg = f.read_all
-               f.close
+               var method_name = test_method.name
+               var test_file = test_suite.test_file
+               var res_name = "{test_file}_{method_name.escape_to_c}"
+               var clock = new Clock
+               var res = toolcontext.safe_exec("{test_file}.bin {method_name} > '{res_name}.out1' 2>&1 </dev/null")
+               if not toolcontext.opt_no_time.value then real_time = clock.total
+
+               var raw_output = "{res_name}.out1".to_path.read_all
+               self.raw_output = raw_output
                # set test case result
-               var loc = test_method.location
                if res != 0 then
-                       error = msg
-                       toolcontext.warning(loc, "ERROR: {test_method.name} (in file {file}.nit): {msg}")
+                       error = "Runtime Error in file {test_file}.nit"
                        toolcontext.modelbuilder.failed_tests += 1
+               else
+                       # no error, check with res file, if any.
+                       var mmodule = test_method.mclassdef.mmodule
+                       var file = mmodule.filepath
+                       if file != null then
+                               var tries = [ file.dirname / mmodule.name + ".sav" / test_method.name + ".res",
+                                       file.dirname / "sav" / test_method.name + ".res" ,
+                                       file.dirname / test_method.name + ".res" ]
+                               var savs = [ for t in tries do if t.file_exists then t ]
+                               if savs.length == 1 then
+                                       var sav = savs.first
+                                       toolcontext.info("Diff output with {sav}", 1)
+                                       res = toolcontext.safe_exec("diff -u --label 'expected:{sav}' --label 'got:{res_name}.out1' '{sav}' '{res_name}.out1' > '{res_name}.diff' 2>&1 </dev/null")
+                                       if res == 0 then
+                                               # OK
+                                       else if toolcontext.opt_autosav.value then
+                                               raw_output.write_to_file(sav)
+                                               info = "Expected output updated: {sav} (--autoupdate)"
+                                       else
+                                               self.raw_output = "Diff\n" + "{res_name}.diff".to_path.read_all
+                                               error = "Difference with expected output: diff -u {sav} {res_name}.out1"
+                                               toolcontext.modelbuilder.failed_tests += 1
+                                       end
+                               else if savs.length > 1 then
+                                       toolcontext.info("Conflicting diffs: {savs.join(", ")}", 1)
+                                       error = "Conflicting expected output: {savs.join(", ", " and ")} all exist"
+                                       toolcontext.modelbuilder.failed_tests += 1
+                               else if not raw_output.is_empty then
+                                       toolcontext.info("No diff: {tries.join(", ", " or ")} not found", 1)
+                                       if toolcontext.opt_autosav.value then
+                                               var sav = tries.first
+                                               sav.dirname.mkdir
+                                               raw_output.write_to_file(sav)
+                                               info = "Expected output saved: {sav} (--autoupdate)"
+                                       end
+                               end
+                       end
                end
-               toolcontext.check_errors
+               is_done = true
        end
 
-       # Error occured during execution.
-       var error: nullable String = null
+       # Make the test case fail without testing it
+       #
+       # Useful when the compilation or the before_test failed.
+       fun fail(message: String) do
+               is_done = true
+               error = message
+               toolcontext.modelbuilder.failed_tests += 1
+       end
 
-       # Error occured during compilation.
-       var failure: nullable String = null
+       redef fun xml_classname do
+               var a = test_method.full_name.split("$")
+               return "nitunit.{a[0]}.{a[1]}"
+       end
 
-       # Was the test case executed at least one?
-       var was_exec = false
+       redef fun xml_name do
+               var a = test_method.full_name.split("$")
+               return a[2]
+       end
+end
 
-       # Return the `TestCase` in XML format compatible with Jenkins.
-       fun to_xml: HTMLTag do
-               var mclassdef = test_method.mclassdef
-               var tc = new HTMLTag("testcase")
-               # NOTE: jenkins expects a '.' in the classname attr
-               tc.attr("classname", mclassdef.mmodule.full_name + "." + mclassdef.mclass.full_name)
-               tc.attr("name", test_method.mproperty.full_name)
-               if was_exec then
-                       tc.add  new HTMLTag("system-err")
-                       var n = new HTMLTag("system-out")
-                       n.append "out"
-                       tc.add n
-                       if error != null then
-                               n = new HTMLTag("error")
-                               n.attr("message", error.to_s)
-                               tc.add n
-                       end
-                       if failure != null then
-                               n = new HTMLTag("failure")
-                               n.attr("message", failure.to_s)
-                               tc.add n
+redef class MClassDef
+       # Methods tagged with `before` in this class definition
+       private fun before: Array[MMethodDef] do
+               var res = new ArraySet[MMethodDef]
+               for mpropdef in mpropdefs do
+                       if mpropdef isa MMethodDef and mpropdef.is_before then
+                               res.add mpropdef
                        end
                end
-               return tc
+               var in_hierarchy = self.in_hierarchy
+               if in_hierarchy == null then return res.to_a
+               for mclassdef in in_hierarchy.direct_greaters do
+                       res.add_all mclassdef.before
+               end
+               return res.to_a
        end
-end
-
-redef class MMethodDef
-       # TODO use annotations?
-
-       # Is the method a test_method?
-       # i.e. begins with "test_"
-       private fun is_test: Bool do return name.has_prefix("test_")
-
-       # Is the method a "before_test"?
-       private fun is_before: Bool do return name == "before_test"
-
-       # Is the method a "after_test"?
-       private fun is_after: Bool do return name == "after_test"
-
-       # Is the method a "before_module"?
-       private fun is_before_module: Bool do return mproperty.is_toplevel and name == "before_module"
 
-       # Is the method a "after_module"?
-       private fun is_after_module: Bool do return mproperty.is_toplevel and name == "after_module"
-end
-
-redef class MClassDef
-       # Is the class a TestClass?
-       # i.e. begins with "Test"
-       private fun is_test: Bool do
-               for sup in in_hierarchy.greaters do
-                       if sup.name == "TestSuite" then return true
+       # Methods tagged with `before_all` in this class definition
+       private fun before_all: Array[MMethodDef] do
+               var res = new ArraySet[MMethodDef]
+               for mpropdef in mpropdefs do
+                       if mpropdef isa MMethodDef and mpropdef.is_before_all then
+                               res.add mpropdef
+                       end
                end
-               return false
+               var in_hierarchy = self.in_hierarchy
+               if in_hierarchy == null then return res.to_a
+               for mclassdef in in_hierarchy.direct_greaters do
+                       res.add_all mclassdef.before_all
+               end
+               return res.to_a
        end
 
-       # "before_test" method for this classdef.
-       private fun before_test: nullable MMethodDef do
+       # Methods tagged with `after` in this class definition
+       private fun after: Array[MMethodDef] do
+               var res = new ArraySet[MMethodDef]
                for mpropdef in mpropdefs do
-                       if mpropdef isa MMethodDef and mpropdef.is_before then return mpropdef
+                       if mpropdef isa MMethodDef and mpropdef.is_after then
+                               res.add mpropdef
+                       end
+               end
+               var in_hierarchy = self.in_hierarchy
+               if in_hierarchy == null then return res.to_a
+               for mclassdef in in_hierarchy.direct_greaters do
+                       res.add_all mclassdef.after
                end
-               return null
+               return res.to_a
        end
 
-       # "after_test" method for this classdef.
-       private fun after_test: nullable MMethodDef do
+       # Methods tagged with `after_all` in this class definition
+       private fun after_all: Array[MMethodDef] do
+               var res = new ArraySet[MMethodDef]
                for mpropdef in mpropdefs do
-                       if mpropdef isa MMethodDef and mpropdef.is_after then return mpropdef
+                       if mpropdef isa MMethodDef and mpropdef.is_after_all then
+                               res.add mpropdef
+                       end
+               end
+               var in_hierarchy = self.in_hierarchy
+               if in_hierarchy == null then return res.to_a
+               for mclassdef in in_hierarchy.direct_greaters do
+                       res.add_all mclassdef.after_all
                end
-               return null
+               return res.to_a
        end
 end
 
 redef class MModule
-       # "before_module" method for this module.
-       private fun before_test: nullable MMethodDef do
-               for mclassdef in mclassdefs do
-                       if not mclassdef.name == "Object" then continue
-                       for mpropdef in mclassdef.mpropdefs do
-                               if mpropdef isa MMethodDef and mpropdef.is_before_module then return mpropdef
+       # Methods tagged with `before_all` at the module level (in `Sys`)
+       private fun before_all: Array[MMethodDef] do
+               var res = new Array[MMethodDef]
+               for mmodule in in_importation.greaters do
+                       for mclassdef in mmodule.mclassdefs do
+                               if mclassdef.name != "Sys" then continue
+                               for mpropdef in mclassdef.mpropdefs do
+                                       if not mpropdef isa MMethodDef or not mpropdef.is_before_all then continue
+                                       res.add mpropdef
+                               end
                        end
                end
-               return null
+               return res
        end
 
-       # "after_module" method for this module.
-       private fun after_test: nullable MMethodDef do
-               for mclassdef in mclassdefs do
-                       if not mclassdef.name == "Object" then continue
-                       for mpropdef in mclassdef.mpropdefs do
-                               if mpropdef isa MMethodDef and mpropdef.is_after_module then return mpropdef
+       # Methods tagged with `after_all` at the module level (in `Sys`)
+       private fun after_all: Array[MMethodDef] do
+               var res = new Array[MMethodDef]
+               for mmodule in in_importation.greaters do
+                       for mclassdef in mmodule.mclassdefs do
+                               if mclassdef.name != "Sys" then continue
+                               for mpropdef in mclassdef.mpropdefs do
+                                       if not mpropdef isa MMethodDef or not mpropdef.is_after_all then continue
+                                       res.add mpropdef
+                               end
                        end
                end
-               return null
+               return res
        end
 end
 
 redef class ModelBuilder
+       # Number of test classes generated.
        var total_classes = 0
+
+       # Number of tests generated.
        var total_tests = 0
+
+       # Number of failed tests.
        var failed_tests = 0
 
-       # Run NitUnit test file for mmodule (if exists).
-       fun test_unit(mmodule: MModule): HTMLTag do
-               var ts = new HTMLTag("testsuite")
-               toolcontext.info("nitunit: test-suite test_{mmodule}", 2)
-               var f = toolcontext.opt_file.value
-               var test_file = "test_{mmodule.name}.nit"
-               if f != null then
-                       test_file = f
-               else if not test_file.file_exists then
-                       var include_dir = mmodule.location.file.filename.dirname
-                       test_file = "{include_dir}/{test_file}"
-               end
-               if not test_file.file_exists then
-                       toolcontext.info("Skip test for {mmodule}, no file {test_file} found", 1)
-                       return ts
-               end
+       # Run NitUnit test suite for `mmodule` (if it is one).
+       fun test_unit(mmodule: MModule): nullable HTMLTag do
+               # is the module a test_suite?
+               if not mmodule.is_test then return null
+               toolcontext.info("nitunit: test-suite {mmodule}", 2)
+
                var tester = new NitUnitTester(self)
-               var res = tester.test_module_unit(test_file)
-               if res == null then
-                       toolcontext.info("Skip test for {mmodule}, no test suite found", 1)
-                       return ts
-               end
+               var res = tester.test_module_unit(mmodule)
                return res.to_xml
        end
 end