lib: rename Counter::total into Counter::sum
[nit.git] / src / metrics / rta_metrics.nit
index f494736..820c2ef 100644 (file)
@@ -35,35 +35,18 @@ private class RTAMetricsPhase
        end
 end
 
-redef class RapidTypeAnalysis
-       redef fun add_type(mtype)
-       do
-               mtype.nlvt += 1
-               mtype.mclass.nlvt += 1
-               mtype.mclass.live_types.add(mtype)
-               super(mtype)
-       end
-
-       redef fun add_cast_type(mtype)
-       do
-               mtype.nlct += 1
-               mtype.mclass.nlct += 1
-               mtype.mclass.cast_types.add(mtype)
-               super(mtype)
-       end
-end
 
 redef class MType
-       var nlvt: Int = 0
-       var nlct: Int = 0
+       private var nlvt: Int = 0
+       private var nlct: Int = 0
 
-       fun is_user_defined: Bool do
+       private fun is_standard: Bool do
                var mtype = self
                if mtype isa MNullableType then mtype = mtype.mtype
-               return self.as(MClassType).mclass.is_user_defined
+               return self.as(MClassType).mclass.is_standard
        end
 
-       fun get_depth: Int do
+       private fun get_depth: Int do
                var mtype = self
                if mtype isa MNullableType then mtype = mtype.mtype
                if not mtype isa MGenericType then return 0
@@ -77,10 +60,10 @@ redef class MType
 end
 
 redef class MClass
-       var nlvt: Int = 0
-       var nlct: Int = 0
-       var live_types: Set[MType] = new HashSet[MType]
-       var cast_types: Set[MType] = new HashSet[MType]
+       private var nlvt: Int = 0
+       private var nlct: Int = 0
+       private var live_types: Set[MType] = new HashSet[MType]
+       private var cast_types: Set[MType] = new HashSet[MType]
 end
 
 # Run a runtime type analysis and print metrics
@@ -107,8 +90,10 @@ do
        for mtype in analysis.live_types do
                mtypes.add(mtype)
                nlvt += 1
+               mtype.mclass.nlvt += 1
+               mtype.mclass.live_types.add(mtype)
                if mtype isa MGenericType then nlvtg += 1
-               if mtype.is_user_defined then
+               if not mtype.is_standard then
                        nlvtudud += 1
                        if mtype isa MGenericType then nlvtgudud += 1
                else
@@ -118,10 +103,14 @@ do
        end
 
        for mtype in analysis.live_cast_types do
+               if mtype isa MNullableType then mtype = mtype.mtype
+               if not mtype isa MClassType then continue
                mtypes.add(mtype)
                nlct += 1
+               mtype.mclass.nlct += 1
+               mtype.mclass.cast_types.add(mtype)
                if mtype isa MGenericType then nlctg += 1
-               if mtype.is_user_defined then
+               if not mtype.is_standard then
                        nlctudud += 1
                        if mtype isa MGenericType then nlctgudud += 1
                else
@@ -131,53 +120,64 @@ do
        end
 
        # CSV generation
-       if modelbuilder.toolcontext.opt_generate_csv.value then
-               var summaryCSV = new CSVDocument(modelbuilder.toolcontext.output_dir.join_path("rta_sum_metrics.csv"))
+       if modelbuilder.toolcontext.opt_csv.value then
+               var summaryCSV = new CSVDocument
                summaryCSV.set_header("scope", "NLVT", "NLVTG", "NLCT", "NLVCTG")
                summaryCSV.add_line("global", nlvt, nlvtg, nlct, nlctg)
                summaryCSV.add_line("SLUD", nlvtslud, nlvtgslud, nlctslud, nlctgslud)
                summaryCSV.add_line("UDUD", nlvtudud, nlvtgudud, nlctudud, nlctgudud)
-               summaryCSV.save
+               summaryCSV.save(modelbuilder.toolcontext.output_dir.join_path("rta_sum_metrics.csv"))
 
-               var scalarCSV = new CSVDocument(modelbuilder.toolcontext.output_dir.join_path("rta_scalar_metrics.csv"))
-               var udscalarCSV = new CSVDocument(modelbuilder.toolcontext.output_dir.join_path("rta_ud_scalar_metrics.csv"))
+               var scalarCSV = new CSVDocument
+               var udscalarCSV = new CSVDocument
                scalarCSV.set_header("Type", "AGS", "DGS", "NLVT", "NLCT")
                udscalarCSV.set_header("Type", "AGS", "DGS", "NLVT", "NLCT")
 
                for mtype in mtypes do
                        var arity = 0
                        if mtype isa MGenericType then arity = mtype.arguments.length
-                       if mtype.is_user_defined then
+                       if not mtype.is_standard then
                                udscalarCSV.add_line(mtype, arity, mtype.get_depth, mtype.nlvt, mtype.nlct)
                        end
                        scalarCSV.add_line(mtype, arity, mtype.get_depth, mtype.nlvt, mtype.nlct)
                end
-               scalarCSV.save
-               udscalarCSV.save
+               scalarCSV.save(modelbuilder.toolcontext.output_dir.join_path("rta_scalar_metrics.csv"))
+               udscalarCSV.save(modelbuilder.toolcontext.output_dir.join_path("rta_ud_scalar_metrics.csv"))
 
-               scalarCSV = new CSVDocument(modelbuilder.toolcontext.output_dir.join_path("rta_scalar_class_metrics.csv"))
-               udscalarCSV = new CSVDocument(modelbuilder.toolcontext.output_dir.join_path("rta_ud_scalar_class_metrics.csv"))
+               scalarCSV = new CSVDocument
+               udscalarCSV = new CSVDocument
                scalarCSV.set_header("Class", "AGS", "NLVV", "NLVT")
                udscalarCSV.set_header("Class", "AGS", "NLVV", "inst")
 
                for mclass in modelbuilder.model.mclasses do
                        if not mclass.is_class or mclass.is_abstract then continue
-                       if mclass.is_user_defined then
+                       if not mclass.is_standard then
                                udscalarCSV.add_line(mclass.mclass_type, mclass.arity, mclass.live_types.length, mclass.nlvt)
                        end
                        scalarCSV.add_line(mclass.mclass_type, mclass.arity, mclass.live_types.length, mclass.nlvt)
                end
-               scalarCSV.save
-               udscalarCSV.save
+               scalarCSV.save(modelbuilder.toolcontext.output_dir.join_path("rta_scalar_class_metrics.csv"))
+               udscalarCSV.save(modelbuilder.toolcontext.output_dir.join_path("rta_ud_scalar_class_metrics.csv"))
        end
 
        print "--- RTA metrics ---"
+       print "Number of live runtime classes: {analysis.live_classes.length}"
+       if analysis.live_classes.length < 8 then print "\t{analysis.live_classes.join(" ")}"
        print "Number of live runtime types (instantied resolved type): {analysis.live_types.length}"
        if analysis.live_types.length < 8 then print "\t{analysis.live_types.join(" ")}"
+       print "Number of live methods: {analysis.live_methods.length}"
+       if analysis.live_methods.length < 8 then print "\t{analysis.live_methods.join(" ")}"
        print "Number of live method definitions: {analysis.live_methoddefs.length}"
        if analysis.live_methoddefs.length < 8 then print "\t{analysis.live_methoddefs.join(" ")}"
-       print "Number of live customized method definitions: {analysis.live_customized_methoddefs.length}"
-       if analysis.live_customized_methoddefs.length < 8 then print "\t{analysis.live_customized_methoddefs.join(" ")}"
        print "Number of live runtime cast types (ie used in as and isa): {analysis.live_cast_types.length}"
        if analysis.live_cast_types.length < 8 then print "\t{analysis.live_cast_types.join(" ")}"
+
+       var x = 0
+       for p in analysis.live_methods do
+               for d in p.mpropdefs do
+                       if analysis.live_methoddefs.has(d) or d.is_abstract then continue
+                       x += 1
+               end
+       end
+       print "Number of dead method definitions of live methods: {x}"
 end