mirror of
https://gitea.invidious.io/iv-org/shard-spectator.git
synced 2024-08-15 00:53:35 +00:00
Hack together result output
This commit is contained in:
parent
f5713efc62
commit
9d139dfeed
25 changed files with 168 additions and 138 deletions
|
@ -67,7 +67,7 @@ module Spectator
|
|||
# Build the spec and run it.
|
||||
DSL::Builder.config = config
|
||||
spec = DSL::Builder.build
|
||||
spec.run
|
||||
spec.run(config.example_filter)
|
||||
true
|
||||
rescue ex
|
||||
# Catch all unhandled exceptions here.
|
||||
|
|
|
@ -10,9 +10,19 @@ module Spectator
|
|||
visitor.error
|
||||
end
|
||||
|
||||
# Calls the `error` method on *visitor*.
|
||||
def accept(visitor)
|
||||
visitor.error(yield self)
|
||||
end
|
||||
|
||||
# One-word description of the result.
|
||||
def to_s(io)
|
||||
io << "error"
|
||||
end
|
||||
|
||||
# TODO
|
||||
def to_json(builder)
|
||||
builder.string("ERROR")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -17,7 +17,10 @@ module Spectator
|
|||
getter? finished : Bool = false
|
||||
|
||||
# Retrieves the result of the last time the example ran.
|
||||
getter result : Result = PendingResult.new
|
||||
def result : Result
|
||||
# TODO: Set to pending immediately (requires circular dependency between Example <-> Result removed).
|
||||
@result ||= PendingResult.new(self)
|
||||
end
|
||||
|
||||
# Creates the example.
|
||||
# An instance to run the test code in is given by *context*.
|
||||
|
@ -59,7 +62,7 @@ module Spectator
|
|||
|
||||
if pending?
|
||||
Log.debug { "Skipping example #{self} - marked pending" }
|
||||
return @result = PendingResult.new
|
||||
return @result = PendingResult.new(self)
|
||||
end
|
||||
|
||||
previous_example = @@current
|
||||
|
@ -146,6 +149,11 @@ module Spectator
|
|||
io << result
|
||||
end
|
||||
|
||||
# TODO
|
||||
def to_json(builder)
|
||||
builder.string("EXAMPLE")
|
||||
end
|
||||
|
||||
# Wraps an example to behave like a `Proc`.
|
||||
# This is typically used for an *around_each* hook.
|
||||
# Invoking `#call` or `#run` will run the example.
|
||||
|
|
|
@ -11,8 +11,8 @@ module Spectator
|
|||
# Creates a failure result.
|
||||
# The *elapsed* argument is the length of time it took to run the example.
|
||||
# The *error* is the exception raised that caused the failure.
|
||||
def initialize(elapsed, @error)
|
||||
super(elapsed)
|
||||
def initialize(example, elapsed, @error, expectations = [] of Expectation)
|
||||
super(example, elapsed, expectations)
|
||||
end
|
||||
|
||||
# Calls the `failure` method on *visitor*.
|
||||
|
@ -20,9 +20,19 @@ module Spectator
|
|||
visitor.failure
|
||||
end
|
||||
|
||||
# Calls the `failure` method on *visitor*.
|
||||
def accept(visitor)
|
||||
visitor.failure(yield self)
|
||||
end
|
||||
|
||||
# One-word description of the result.
|
||||
def to_s(io)
|
||||
io << "fail"
|
||||
end
|
||||
|
||||
# TODO
|
||||
def to_json(builder)
|
||||
builder.string("FAIL")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -15,7 +15,7 @@ module Spectator::Formatting
|
|||
}
|
||||
|
||||
# Colorizes some text with the success color.
|
||||
def success(text)
|
||||
def pass(text)
|
||||
text.colorize(COLORS[:success])
|
||||
end
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ module Spectator::Formatting
|
|||
# Produces a single character output based on a result.
|
||||
def end_example(result)
|
||||
@previous_hierarchy.size.times { @io.print INDENT }
|
||||
@io.puts result.call(Color) { result.example.description }
|
||||
@io.puts result.accept(Color) { result.example.name }
|
||||
end
|
||||
|
||||
# Produces a list of groups making up the hierarchy for an example.
|
||||
|
@ -38,7 +38,7 @@ module Spectator::Formatting
|
|||
group = example.group
|
||||
while group.is_a?(ExampleGroup)
|
||||
hierarchy << group
|
||||
group = group.parent
|
||||
group = group.group?
|
||||
end
|
||||
hierarchy.reverse
|
||||
end
|
||||
|
@ -57,7 +57,7 @@ module Spectator::Formatting
|
|||
private def print_sub_hierarchy(index, sub_hierarchy)
|
||||
sub_hierarchy.each do |group|
|
||||
index.times { @io.print INDENT }
|
||||
@io.puts group.description
|
||||
@io.puts group.name
|
||||
index += 1
|
||||
end
|
||||
end
|
||||
|
|
|
@ -21,7 +21,7 @@ module Spectator::Formatting
|
|||
|
||||
# Produces a single character output based on a result.
|
||||
def end_example(result)
|
||||
@io.print result.call(Character)
|
||||
@io.print result.accept(Character)
|
||||
end
|
||||
|
||||
# Interface for `Result` to pick a character for output.
|
||||
|
@ -37,8 +37,8 @@ module Spectator::Formatting
|
|||
}
|
||||
|
||||
# Character output for a successful example.
|
||||
def success
|
||||
Color.success(CHARACTERS[:success])
|
||||
def pass
|
||||
Color.pass(CHARACTERS[:success])
|
||||
end
|
||||
|
||||
# Character output for a failed example.
|
||||
|
|
|
@ -7,7 +7,7 @@ module Spectator::Formatting
|
|||
private getter result
|
||||
|
||||
# Creates the JUnit test case.
|
||||
def initialize(@result : ErroredResult)
|
||||
def initialize(@result : ErrorResult)
|
||||
end
|
||||
|
||||
# Adds the exception to the XML block.
|
||||
|
|
|
@ -16,7 +16,7 @@ module Spectator::Formatting
|
|||
# Creates the failure block.
|
||||
# The *index* uniquely identifies the failure in the output.
|
||||
# The *result* is the outcome of the failed example.
|
||||
def initialize(@index : Int32, @result : FailedResult)
|
||||
def initialize(@index : Int32, @result : FailResult)
|
||||
end
|
||||
|
||||
# Creates the block of text describing the failure.
|
||||
|
@ -47,12 +47,12 @@ module Spectator::Formatting
|
|||
# then an error stacktrace if an error occurred.
|
||||
private def content(indent)
|
||||
unsatisfied_expectations(indent)
|
||||
error_stacktrace(indent) if @result.is_a?(ErroredResult)
|
||||
error_stacktrace(indent) if @result.is_a?(ErrorResult)
|
||||
end
|
||||
|
||||
# Produces a list of unsatisfied expectations and their values.
|
||||
private def unsatisfied_expectations(indent)
|
||||
@result.expectations.each_unsatisfied do |expectation|
|
||||
@result.expectations.reject(&.satisfied?).each do |expectation|
|
||||
indent.line(Color.failure(LabeledText.new("Failure", expectation.failure_message)))
|
||||
indent.line
|
||||
indent.increase do
|
||||
|
@ -66,7 +66,7 @@ module Spectator::Formatting
|
|||
private def matcher_values(indent, expectation)
|
||||
MatchDataValues.new(expectation.values).each do |pair|
|
||||
colored_pair = if expectation.satisfied?
|
||||
Color.success(pair)
|
||||
Color.pass(pair)
|
||||
else
|
||||
Color.failure(pair)
|
||||
end
|
||||
|
|
|
@ -13,7 +13,7 @@ module Spectator::Formatting
|
|||
|
||||
# Colorizes the command instance based on the result.
|
||||
def self.color(result)
|
||||
result.call(Color) { new(result.example) }
|
||||
result.accept(Color) { new(result.example) }
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -7,7 +7,7 @@ module Spectator::Formatting
|
|||
private getter result
|
||||
|
||||
# Creates the JUnit test case.
|
||||
def initialize(@result : FailedResult)
|
||||
def initialize(@result : FailResult)
|
||||
end
|
||||
|
||||
# Status string specific to the result type.
|
||||
|
@ -18,7 +18,7 @@ module Spectator::Formatting
|
|||
# Adds the failed expectations to the XML block.
|
||||
private def content(xml)
|
||||
super
|
||||
@result.expectations.each_unsatisfied do |expectation|
|
||||
@result.expectations.reject(&.satisfied?).each do |expectation|
|
||||
xml.element("failure", message: expectation.failure_message) do
|
||||
expectation_values(expectation.values, xml)
|
||||
end
|
||||
|
|
|
@ -25,7 +25,7 @@ module Spectator::Formatting
|
|||
# Adds the test case elements to the XML.
|
||||
private def add_test_cases(xml)
|
||||
@report.each do |result|
|
||||
test_case = result.call(JUnitTestCaseSelector) { |r| r }
|
||||
test_case = result.accept(JUnitTestCaseSelector) { |r| r }
|
||||
test_case.to_xml(xml)
|
||||
end
|
||||
end
|
||||
|
@ -50,18 +50,18 @@ module Spectator::Formatting
|
|||
extend self
|
||||
|
||||
# Creates a successful JUnit test case.
|
||||
def success(result)
|
||||
SuccessfulJUnitTestCase.new(result.as(SuccessfulResult))
|
||||
def pass(result)
|
||||
SuccessfulJUnitTestCase.new(result.as(PassResult))
|
||||
end
|
||||
|
||||
# Creates a failure JUnit test case.
|
||||
def failure(result)
|
||||
FailureJUnitTestCase.new(result.as(FailedResult))
|
||||
FailureJUnitTestCase.new(result.as(FailResult))
|
||||
end
|
||||
|
||||
# Creates an error JUnit test case.
|
||||
def error(result)
|
||||
ErrorJUnitTestCase.new(result.as(ErroredResult))
|
||||
ErrorJUnitTestCase.new(result.as(ErrorResult))
|
||||
end
|
||||
|
||||
# Creates a skipped JUnit test case.
|
||||
|
|
|
@ -20,7 +20,7 @@ module Spectator::Formatting
|
|||
elsif @pending > 0
|
||||
Color.pending(self)
|
||||
else
|
||||
Color.success(self)
|
||||
Color.pass(self)
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ module Spectator::Formatting
|
|||
private getter result
|
||||
|
||||
# Creates the JUnit test case.
|
||||
def initialize(@result : SuccessfulResult)
|
||||
def initialize(@result : PassResult)
|
||||
end
|
||||
|
||||
# Status string specific to the result type.
|
||||
|
|
|
@ -17,7 +17,7 @@ module Spectator::Formatting
|
|||
|
||||
# The text "ok" or "not ok" depending on the result.
|
||||
private def status
|
||||
@result.is_a?(FailedResult) ? "not ok" : "ok"
|
||||
@result.is_a?(FailResult) ? "not ok" : "ok"
|
||||
end
|
||||
|
||||
# The example that was tested.
|
||||
|
|
|
@ -91,13 +91,14 @@ module Spectator
|
|||
# Takes the *elapsed* time and a possible *error* from the test.
|
||||
# Returns a type of `Result`.
|
||||
private def translate(elapsed, error) : Result
|
||||
example = Example.current # TODO: Remove this.
|
||||
case error
|
||||
when nil
|
||||
PassResult.new(elapsed)
|
||||
PassResult.new(example, elapsed)
|
||||
when ExpectationFailed
|
||||
FailResult.new(elapsed, error)
|
||||
FailResult.new(example, elapsed, error)
|
||||
else
|
||||
ErrorResult.new(elapsed, error)
|
||||
ErrorResult.new(example, elapsed, error)
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
@ -47,7 +47,6 @@ require "./pending_result"
|
|||
require "./profile"
|
||||
require "./report"
|
||||
require "./result"
|
||||
require "./runner"
|
||||
require "./source"
|
||||
require "./source_example_filter"
|
||||
require "./spec"
|
||||
|
|
|
@ -8,9 +8,19 @@ module Spectator
|
|||
visitor.pass
|
||||
end
|
||||
|
||||
# Calls the `pass` method on *visitor*.
|
||||
def accept(visitor)
|
||||
visitor.pass(yield self)
|
||||
end
|
||||
|
||||
# One-word description of the result.
|
||||
def to_s(io)
|
||||
io << "pass"
|
||||
end
|
||||
|
||||
# TODO
|
||||
def to_json(builder)
|
||||
builder.string("PASS")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -7,7 +7,7 @@ module Spectator
|
|||
class PendingResult < Result
|
||||
# Creates the result.
|
||||
# *elapsed* is the length of time it took to run the example.
|
||||
def initialize(elapsed = Time::Span::ZERO)
|
||||
def initialize(example, elapsed = Time::Span::ZERO, expectations = [] of Expectation)
|
||||
super
|
||||
end
|
||||
|
||||
|
@ -16,9 +16,19 @@ module Spectator
|
|||
visitor.pending
|
||||
end
|
||||
|
||||
# Calls the `pending` method on the *visitor*.
|
||||
def accept(visitor)
|
||||
visitor.pending(yield self)
|
||||
end
|
||||
|
||||
# One-word description of the result.
|
||||
def to_s(io)
|
||||
io << "pending"
|
||||
end
|
||||
|
||||
# TODO
|
||||
def to_json(builder)
|
||||
builder.string("PENDING")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -8,7 +8,7 @@ module Spectator
|
|||
|
||||
# Creates the profiling information.
|
||||
# The *slowest* results must already be sorted, longest time first.
|
||||
private def initialize(@slowest : Array(FinishedResult), @total_time)
|
||||
private def initialize(@slowest : Array(Result), @total_time)
|
||||
end
|
||||
|
||||
# Number of results in the profile.
|
||||
|
@ -33,7 +33,7 @@ module Spectator
|
|||
|
||||
# Produces the profile from a report.
|
||||
def self.generate(report, size = 10)
|
||||
results = report.compact_map(&.as?(FinishedResult))
|
||||
results = report.to_a
|
||||
sorted_results = results.sort_by(&.elapsed)
|
||||
slowest = sorted_results.last(size).reverse
|
||||
self.new(slowest, report.example_runtime)
|
||||
|
|
|
@ -37,12 +37,12 @@ module Spectator
|
|||
def initialize(@results : Array(Result), @runtime, @remaining_count = 0, @fail_blank = false, @random_seed = nil)
|
||||
@results.each do |result|
|
||||
case result
|
||||
when SuccessfulResult
|
||||
when PassResult
|
||||
@successful_count += 1
|
||||
when ErroredResult
|
||||
when ErrorResult
|
||||
@error_count += 1
|
||||
@failed_count += 1
|
||||
when FailedResult
|
||||
when FailResult
|
||||
@failed_count += 1
|
||||
when PendingResult
|
||||
@pending_count += 1
|
||||
|
@ -58,7 +58,7 @@ module Spectator
|
|||
# The *results* are from running the examples in the test suite.
|
||||
# The runtime is calculated from the *results*.
|
||||
def initialize(results : Array(Result))
|
||||
runtime = results.each.compact_map(&.as?(FinishedResult)).sum(&.elapsed)
|
||||
runtime = results.sum(&.elapsed)
|
||||
initialize(results, runtime)
|
||||
end
|
||||
|
||||
|
@ -92,19 +92,19 @@ module Spectator
|
|||
|
||||
# Returns a set of results for all failed examples.
|
||||
def failures
|
||||
@results.each.compact_map(&.as?(FailedResult))
|
||||
@results.each.compact_map(&.as?(FailResult))
|
||||
end
|
||||
|
||||
# Returns a set of results for all errored examples.
|
||||
def errors
|
||||
@results.each.compact_map(&.as?(ErroredResult))
|
||||
@results.each.compact_map(&.as?(ErrorResult))
|
||||
end
|
||||
|
||||
# Length of time it took to run just example code.
|
||||
# This does not include hooks,
|
||||
# but it does include pre- and post-conditions.
|
||||
def example_runtime
|
||||
@results.each.compact_map(&.as?(FinishedResult)).sum(&.elapsed)
|
||||
@results.sum(&.elapsed)
|
||||
end
|
||||
|
||||
# Length of time spent in framework processes and hooks.
|
||||
|
|
|
@ -2,15 +2,19 @@ module Spectator
|
|||
# Base class that represents the outcome of running an example.
|
||||
# Sub-classes contain additional information specific to the type of result.
|
||||
abstract class Result
|
||||
# Example that generated the result.
|
||||
# TODO: Remove this.
|
||||
getter example : Example
|
||||
|
||||
# Length of time it took to run the example.
|
||||
getter elapsed : Time::Span
|
||||
|
||||
# The assertions checked in the example.
|
||||
# getter assertions : Enumerable(Assertion) # TODO: Implement Assertion type.
|
||||
getter expectations : Enumerable(Expectation)
|
||||
|
||||
# Creates the result.
|
||||
# *elapsed* is the length of time it took to run the example.
|
||||
def initialize(@elapsed)
|
||||
def initialize(@example, @elapsed, @expectations = [] of Expectation)
|
||||
end
|
||||
|
||||
# Calls the corresponding method for the type of result.
|
||||
|
|
|
@ -1,81 +0,0 @@
|
|||
require "./harness"
|
||||
|
||||
module Spectator
|
||||
# Main driver for executing tests and feeding results to formatters.
|
||||
class Runner
|
||||
# Creates the test suite runner.
|
||||
# Specify the test *suite* to run and any additonal configuration.
|
||||
def initialize(@suite : TestSuite, @config : Config)
|
||||
end
|
||||
|
||||
# Runs the test suite.
|
||||
# This will run the selected examples
|
||||
# and invoke the formatter to output results.
|
||||
# True will be returned if the test suite ran successfully,
|
||||
# or false if there was at least one failure.
|
||||
def run : Bool
|
||||
# Indicate the suite is starting.
|
||||
@config.each_formatter(&.start_suite(@suite))
|
||||
|
||||
# Run all examples and capture the results.
|
||||
results = Array(Result).new(@suite.size)
|
||||
elapsed = Time.measure do
|
||||
collect_results(results)
|
||||
end
|
||||
|
||||
# Generate a report and pass it along to the formatter.
|
||||
remaining = @suite.size - results.size
|
||||
seed = (@config.random_seed? if @config.randomize?)
|
||||
report = Report.new(results, elapsed, remaining, @config.fail_blank?, seed)
|
||||
@config.each_formatter(&.end_suite(report, profile(report)))
|
||||
|
||||
!report.failed?
|
||||
end
|
||||
|
||||
# Runs all examples and adds results to a list.
|
||||
private def collect_results(results)
|
||||
example_order.each do |example|
|
||||
result = run_example(example).as(Result)
|
||||
results << result
|
||||
if @config.fail_fast? && result.is_a?(FailedResult)
|
||||
example.group.context.run_after_all_hooks(example.group, ignore_unfinished: true)
|
||||
break
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Retrieves an enumerable for the examples to run.
|
||||
# The order of examples is randomized
|
||||
# if specified by the configuration.
|
||||
private def example_order
|
||||
@suite.to_a.tap do |examples|
|
||||
examples.shuffle!(@config.random) if @config.randomize?
|
||||
end
|
||||
end
|
||||
|
||||
# Runs a single example and returns the result.
|
||||
# The formatter is given the example and result information.
|
||||
private def run_example(example)
|
||||
@config.each_formatter(&.start_example(example))
|
||||
result = if @config.dry_run? && example.is_a?(RunnableExample)
|
||||
dry_run_result(example)
|
||||
else
|
||||
Harness.run(example)
|
||||
end
|
||||
@config.each_formatter(&.end_example(result))
|
||||
result
|
||||
end
|
||||
|
||||
# Creates a fake result for an example.
|
||||
private def dry_run_result(example)
|
||||
expectations = [] of Expectations::Expectation
|
||||
example_expectations = Expectations::ExampleExpectations.new(expectations)
|
||||
SuccessfulResult.new(example, Time::Span.zero, example_expectations)
|
||||
end
|
||||
|
||||
# Generates and returns a profile if one should be displayed.
|
||||
private def profile(report)
|
||||
Profile.generate(report) if @config.profile?
|
||||
end
|
||||
end
|
||||
end
|
|
@ -1,7 +1,7 @@
|
|||
require "./config"
|
||||
require "./example"
|
||||
require "./example_group"
|
||||
require "./example_iterator"
|
||||
require "./test_suite"
|
||||
|
||||
module Spectator
|
||||
# Contains examples to be tested.
|
||||
|
@ -9,15 +9,9 @@ module Spectator
|
|||
def initialize(@root : ExampleGroup, @config : Config)
|
||||
end
|
||||
|
||||
def run
|
||||
Runner.new(examples).run
|
||||
end
|
||||
|
||||
# Generates a list of examples to run.
|
||||
# The order of the examples are also sorted based on the configuration.
|
||||
private def examples
|
||||
examples = ExampleIterator.new(@root).to_a
|
||||
@config.shuffle!(examples)
|
||||
def run(filter : ExampleFilter)
|
||||
suite = TestSuite.new(@root, filter)
|
||||
Runner.new(suite, @config).run
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -3,11 +3,76 @@ require "../example"
|
|||
module Spectator
|
||||
class Spec
|
||||
private struct Runner
|
||||
def initialize(@examples : Array(Example))
|
||||
def initialize(@suite : TestSuite, @config : Config)
|
||||
end
|
||||
|
||||
def run
|
||||
@examples.each(&.run)
|
||||
# Runs the test suite.
|
||||
# This will run the selected examples
|
||||
# and invoke the formatter to output results.
|
||||
# True will be returned if the test suite ran successfully,
|
||||
# or false if there was at least one failure.
|
||||
def run : Bool
|
||||
# Indicate the suite is starting.
|
||||
@config.each_formatter(&.start_suite(@suite))
|
||||
|
||||
# Run all examples and capture the results.
|
||||
results = Array(Result).new(@suite.size)
|
||||
elapsed = Time.measure do
|
||||
collect_results(results)
|
||||
end
|
||||
|
||||
# Generate a report and pass it along to the formatter.
|
||||
remaining = @suite.size - results.size
|
||||
seed = (@config.random_seed if @config.randomize?)
|
||||
report = Report.new(results, elapsed, remaining, @config.fail_blank?, seed)
|
||||
@config.each_formatter(&.end_suite(report, profile(report)))
|
||||
|
||||
!report.failed?
|
||||
end
|
||||
|
||||
# Runs all examples and adds results to a list.
|
||||
private def collect_results(results)
|
||||
example_order.each do |example|
|
||||
result = run_example(example).as(Result)
|
||||
results << result
|
||||
if @config.fail_fast? && result.is_a?(FailResult)
|
||||
example.group.call_once_after_all
|
||||
break
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Retrieves an enumerable for the examples to run.
|
||||
# The order of examples is randomized
|
||||
# if specified by the configuration.
|
||||
private def example_order
|
||||
@suite.to_a.tap do |examples|
|
||||
@config.shuffle!(examples)
|
||||
end
|
||||
end
|
||||
|
||||
# Runs a single example and returns the result.
|
||||
# The formatter is given the example and result information.
|
||||
private def run_example(example)
|
||||
@config.each_formatter(&.start_example(example))
|
||||
result = if @config.dry_run?
|
||||
dry_run_result(example)
|
||||
else
|
||||
example.run
|
||||
end
|
||||
@config.each_formatter(&.end_example(result))
|
||||
result
|
||||
end
|
||||
|
||||
# Creates a fake result for an example.
|
||||
private def dry_run_result(example)
|
||||
expectations = [] of Expectation
|
||||
PassResult.new(example, Time::Span.zero, expectations)
|
||||
end
|
||||
|
||||
# Generates and returns a profile if one should be displayed.
|
||||
private def profile(report)
|
||||
Profile.generate(report) if @config.profile?
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
Loading…
Reference in a new issue