X-Git-Url: http://nitlanguage.org diff --git a/src/metrics/metrics_base.nit b/src/metrics/metrics_base.nit index 647f1a1..a7ded94 100644 --- a/src/metrics/metrics_base.nit +++ b/src/metrics/metrics_base.nit @@ -19,8 +19,10 @@ module metrics_base import model_utils +import modelbuilder import csv import counter +import console redef class ToolContext @@ -29,6 +31,10 @@ redef class ToolContext # --mmodules var opt_mmodules = new OptionBool("Compute metrics about mmodules", "--mmodules") + # --mclassses + var opt_mclasses = new OptionBool("Compute metrics about mclasses", "--mclasses") + # --mendel + var opt_mendel = new OptionBool("Compute mendel metrics", "--mendel") # --inheritance var opt_inheritance = new OptionBool("Compute metrics about inheritance usage", "--inheritance") # --genericity @@ -46,13 +52,17 @@ redef class ToolContext # --rta var opt_rta = new OptionBool("Compute RTA metrics", "--rta") # --generate-csv - var opt_generate_csv = new OptionBool("Generate CVS format metrics", "--generate-csv") + var opt_csv = new OptionBool("Export metrics in CSV format", "--csv") # --generate_hyperdoc var opt_generate_hyperdoc = new OptionBool("Generate Hyperdoc", "--generate_hyperdoc") # --poset var opt_poset = new OptionBool("Complete metrics on posets", "--poset") - + # --no-colors + var opt_nocolors = new OptionBool("Disable colors in console outputs", "--no-colors") + # --dir var opt_dir = new OptionString("Directory where some statistics files are generated", "-d", "--dir") + + # Output directory for metrics files. var output_dir: String = "." redef init @@ -60,6 +70,8 @@ redef class ToolContext super self.option_context.add_option(opt_all) self.option_context.add_option(opt_mmodules) + self.option_context.add_option(opt_mclasses) + self.option_context.add_option(opt_mendel) self.option_context.add_option(opt_inheritance) self.option_context.add_option(opt_refinement) self.option_context.add_option(opt_self) @@ -68,13 +80,14 @@ redef class ToolContext self.option_context.add_option(opt_static_types) self.option_context.add_option(opt_tables) self.option_context.add_option(opt_rta) - self.option_context.add_option(opt_generate_csv) + self.option_context.add_option(opt_csv) self.option_context.add_option(opt_generate_hyperdoc) self.option_context.add_option(opt_poset) self.option_context.add_option(opt_dir) + self.option_context.add_option(opt_nocolors) end - redef fun process_options + redef fun process_options(args) do super var val = self.opt_dir.value @@ -84,49 +97,60 @@ redef class ToolContext self.output_dir = val end end -end -redef class Model - - # List of modules in std lib - # FIXME this is quite ugly, find a dynamic way... - fun std_modules: Set[String] do - if self.std_modules_cache == null then - self.std_modules_cache = new HashSet[String] - self.std_modules_cache.add("collection") - self.std_modules_cache.add("abstract_collection") - self.std_modules_cache.add("array") - self.std_modules_cache.add("hash_collection") - self.std_modules_cache.add("list") - self.std_modules_cache.add("range") - self.std_modules_cache.add("sorter") - self.std_modules_cache.add("environ") - self.std_modules_cache.add("exec") - self.std_modules_cache.add("file") - self.std_modules_cache.add("gc") - self.std_modules_cache.add("hash") - self.std_modules_cache.add("kernel") - self.std_modules_cache.add("math") - self.std_modules_cache.add("standard") - self.std_modules_cache.add("stream") - self.std_modules_cache.add("string") - self.std_modules_cache.add("string_search") - self.std_modules_cache.add("time") - end - return self.std_modules_cache.as(not null) + # Format and colorize a string heading of level 1 for console output. + # + # Default style is yellow and bold. + fun format_h1(str: String): String do + if opt_nocolors.value then return str + return str.yellow.bold + end + + # Format and colorize a string heading of level 2 for console output. + # + # Default style is white and bold. + fun format_h2(str: String): String do + if opt_nocolors.value then return str + return str.bold end - private var std_modules_cache: nullable Set[String] + + # Format and colorize a string heading of level 3 for console output. + # + # Default style is white and nobold. + fun format_h3(str: String): String do + if opt_nocolors.value then return str + return str + end + + # Format and colorize a string heading of level 4 for console output. + # + # Default style is green. + fun format_h4(str: String): String do + if opt_nocolors.value then return str + return str.green + end + + # Format and colorize a string heading of level 5 for console output. + # + # Default style is light gray. + fun format_p(str: String): String do + if opt_nocolors.value then return str + return str.light_gray + end + end redef class MClass - fun is_user_defined: Bool do - return self.intro_mmodule.is_user_defined + # is the class imported from standard lib? + fun is_standard: Bool do + return self.intro_mmodule.mgroup.mproject.name == "standard" end end redef class MModule - fun is_user_defined: Bool do - return not self.model.std_modules.has(self.name) + # is the module imported from standard lib? + fun is_standard: Bool do + return self.mgroup.mproject.name == "standard" end end @@ -134,88 +158,221 @@ end # # The concept is reified here for a better organization and documentation interface Metric + + # Type of elements measured by this metric. + type ELM: Object + + # Type of values used to measure elements. + type VAL: Object + + # Type of data representation used to associate elements and values. + type RES: Map[ELM, VAL] + + # The name of this metric (generally an acronym about the metric). fun name: String is abstract + + # A long and understandable description about what is measured by this metric. fun desc: String is abstract - # clear all results for this metric + + # Clear all results for this metric fun clear is abstract + + # Values for each element + fun values: RES is abstract + + # Collect metric values on elements + fun collect(elements: Set[ELM]) is abstract + + # The value calculated for the element + fun [](element: ELM): VAL do return values[element] + + # Does the element have a value for this metric? + fun has_element(element: ELM): Bool do return values.has_key(element) + + # The values average + fun avg: Float is abstract + + # Pretty print the metric results in console + fun to_console(indent: Int, colors: Bool) do + if values.is_empty then + if colors then + print "{"\t" * indent}{name}: {desc} -- nothing".green + else + print "{"\t" * indent}{name}: {desc} -- nothing" + end + return + end + + var max = self.max + var min = self.min + if colors then + print "{"\t" * indent}{name}: {desc}".green + print "{"\t" * indent} avg: {avg}".light_gray + print "{"\t" * indent} max: {max} ({self[max]})".light_gray + print "{"\t" * indent} min: {min} ({self[min]})".light_gray + print "{"\t" * indent} std: {std_dev}".light_gray + else + print "{"\t" * indent}{name}: {desc}" + print "{"\t" * indent} avg: {avg}" + print "{"\t" * indent} max: {max} ({self[max]})" + print "{"\t" * indent} min: {min} ({self[min]})" + print "{"\t" * indent} std: {std_dev}" + end + end + + # The sum of all the values. + fun sum: VAL is abstract + + # The values standard derivation + fun std_dev: Float is abstract + + # The element with the highest value + fun max: ELM is abstract + + # The element with the lowest value + fun min: ELM is abstract + + # The value threshold above what elements are considered as 'interesting' + fun threshold: Float do return avg + std_dev + + # The set of element above the threshold + fun above_threshold: Set[ELM] is abstract + + # Sort the metric keys by values + fun sort: Array[ELM] do + return values.keys_sorted_by_values(default_reverse_comparator) + end end # A Metric that collects integer data # # Used to count things -class IntMetric[E: Object] +class IntMetric super Metric - var values = new Counter[E] + redef type VAL: Int + redef type RES: Counter[ELM] - redef fun clear do values.clear + # `IntMetric` uses a Counter to store values in intern. + protected var values_cache = new Counter[ELM] - # Return the couple with the highest value - fun max: Couple[E, Int] do - assert not values.is_empty - var elem = values.max.as(not null) - var value = values[elem] - return new Couple[E, Int](elem, value) + redef fun values do return values_cache + + redef fun clear do values_cache.clear + + redef fun sum do return values_cache.sum + + redef fun max do + assert not values_cache.is_empty + return values_cache.max.as(not null) end - # Return the couple with the lowest value - fun min: Couple[E, Int] do - assert not values.is_empty - var elem = values.min.as(not null) - var value = values[elem] - return new Couple[E, Int](elem, value) + redef fun min do + assert not values_cache.is_empty + return values_cache.min.as(not null) end # Values average - fun avg: Float do return values.avg + redef fun avg do return values_cache.avg + + redef fun std_dev do return values_cache.std_dev + + redef fun above_threshold do + var above = new HashSet[ELM] + var threshold = threshold + for element, value in values do + if value.to_f > threshold then above.add(element) + end + return above + end + + redef fun to_console(indent, colors) do + super + if colors then + print "{"\t" * indent} sum: {sum}".light_gray + else + print "{"\t" * indent} sum: {sum}" + end + end end # A Metric that collects float datas # # Used sor summarization -class FloatMetric[E: Object] +class FloatMetric super Metric - var values: Map[E, Float] = new HashMap[E, Float] + redef type VAL: Float + + # `FloatMetric` uses a Map to store values in intern. + protected var values_cache = new HashMap[ELM, VAL] + + redef fun values do return values_cache - redef fun clear do values.clear + redef fun clear do values_cache.clear - # Return the couple with the highest value - fun max: Couple[E, Float] do + + redef fun sum do + var sum = 0.0 + for v in values.values do sum += v + return sum + end + + redef fun max do assert not values.is_empty var max: nullable Float = null - var elem: nullable E = null + var elem: nullable ELM = null for e, v in values do if max == null or v > max then max = v elem = e end end - return new Couple[E, Float](elem.as(not null), max.as(not null)) + return elem.as(not null) end - # Return the couple with the lowest value - fun min: Couple[E, Float] do + redef fun min do assert not values.is_empty var min: nullable Float = null - var elem: nullable E = null + var elem: nullable ELM = null for e, v in values do if min == null or v < min then min = v elem = e end end - return new Couple[E, Float](elem.as(not null), min.as(not null)) + return elem.as(not null) end - # Values average - fun avg: Float do + redef fun avg do if values.is_empty then return 0.0 + return sum / values.length.to_f + end + + redef fun std_dev do var sum = 0.0 for value in values.values do - sum += value + sum += (value - avg).pow(2.to_f) + end + return (sum / values.length.to_f).sqrt + end + + redef fun above_threshold do + var above = new HashSet[ELM] + var threshold = threshold + for element, value in values do + if value > threshold then above.add(element) + end + return above + end + + redef fun to_console(indent, colors) do + super + if colors then + print "{"\t" * indent} sum: {sum}".light_gray + else + print "{"\t" * indent} sum: {sum}" end - return sum / values.length.to_f end end @@ -223,14 +380,57 @@ end # # It purpose is to be extended with a metric collect service class MetricSet - type METRIC: Metric + + # Type of element measured by this `MetricSet`. + type ELM: Object # Metrics to compute - var metrics: Map[String, METRIC] = new HashMap[String, METRIC] + var metrics: Set[Metric] = new HashSet[Metric] # Add a metric to the set - fun register(metrics: METRIC...) do for metric in metrics do self.metrics[metric.name] = metric + fun register(metrics: Metric...) do for metric in metrics do self.metrics.add(metric) # Clear all results for all metrics - fun clear do for metric in metrics.values do metric.clear + fun clear do for metric in metrics do metric.clear + + # Collect all metrics for this set of class + fun collect(elements: Set[ELM]) do + for metric in metrics do metric.collect(elements) + end + + # Pretty print the resuls in console + fun to_console(indent: Int, colors: Bool) do + for metric in metrics do metric.to_console(indent, colors) + end + + # Export the metric set in CSV format + fun to_csv: CsvDocument do + var csv = new CsvDocument + + csv.format = new CsvFormat('"', ';', "\n") + + # set csv headers + csv.header.add("entry") + for metric in metrics do csv.header.add(metric.name) + + # collect all entries to merge metric results + var entries = new HashSet[ELM] + for metric in metrics do + for entry in metric.values.keys do entries.add(entry) + end + + # collect results + for entry in entries do + var line = [entry.to_s] + for metric in metrics do + if metric.has_element(entry) then + line.add(metric[entry].to_s) + else + line.add("n/a") + end + end + csv.records.add(line) + end + return csv + end end