nextest_runner/reporter/
events.rs

1// Copyright (c) The nextest Contributors
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Events for the reporter.
5//!
6//! These types form the interface between the test runner and the test
7//! reporter. The root structure for all events is [`TestEvent`].
8
9use super::{FinalStatusLevel, StatusLevel, TestOutputDisplay};
10use crate::{
11    config::{elements::LeakTimeoutResult, scripts::ScriptId},
12    list::{TestInstance, TestInstanceId, TestList},
13    runner::{StressCondition, StressCount},
14    test_output::ChildExecutionOutput,
15};
16use chrono::{DateTime, FixedOffset};
17use nextest_metadata::MismatchReason;
18use quick_junit::ReportUuid;
19use std::{collections::BTreeMap, fmt, num::NonZero, process::ExitStatus, time::Duration};
20
21/// A reporter event.
22#[derive(Clone, Debug)]
23pub enum ReporterEvent<'a> {
24    /// A periodic tick.
25    Tick,
26
27    /// A test event.
28    Test(Box<TestEvent<'a>>),
29}
30/// A test event.
31///
32/// Events are produced by a [`TestRunner`](crate::runner::TestRunner) and
33/// consumed by a [`Reporter`](crate::reporter::Reporter).
34#[derive(Clone, Debug)]
35pub struct TestEvent<'a> {
36    /// The time at which the event was generated, including the offset from UTC.
37    pub timestamp: DateTime<FixedOffset>,
38
39    /// The amount of time elapsed since the start of the test run.
40    pub elapsed: Duration,
41
42    /// The kind of test event this is.
43    pub kind: TestEventKind<'a>,
44}
45
46/// The kind of test event this is.
47///
48/// Forms part of [`TestEvent`].
49#[derive(Clone, Debug)]
50pub enum TestEventKind<'a> {
51    /// The test run started.
52    RunStarted {
53        /// The list of tests that will be run.
54        ///
55        /// The methods on the test list indicate the number of tests that will be run.
56        test_list: &'a TestList<'a>,
57
58        /// The UUID for this run.
59        run_id: ReportUuid,
60
61        /// The nextest profile chosen for this run.
62        profile_name: String,
63
64        /// The command-line arguments for the process.
65        cli_args: Vec<String>,
66
67        /// The stress condition for this run, if any.
68        stress_condition: Option<StressCondition>,
69    },
70
71    /// When running stress tests serially, a sub-run started.
72    StressSubRunStarted {
73        /// The amount of progress completed so far.
74        progress: StressProgress,
75    },
76
77    /// A setup script started.
78    SetupScriptStarted {
79        /// If a stress test is being run, the stress index, starting from 0.
80        stress_index: Option<StressIndex>,
81
82        /// The setup script index.
83        index: usize,
84
85        /// The total number of setup scripts.
86        total: usize,
87
88        /// The script ID.
89        script_id: ScriptId,
90
91        /// The program to run.
92        program: String,
93
94        /// The arguments to the program.
95        args: &'a [String],
96
97        /// True if some output from the setup script is being passed through.
98        no_capture: bool,
99    },
100
101    /// A setup script was slow.
102    SetupScriptSlow {
103        /// If a stress test is being run, the stress index, starting from 0.
104        stress_index: Option<StressIndex>,
105
106        /// The script ID.
107        script_id: ScriptId,
108
109        /// The program to run.
110        program: String,
111
112        /// The arguments to the program.
113        args: &'a [String],
114
115        /// The amount of time elapsed since the start of execution.
116        elapsed: Duration,
117
118        /// True if the script has hit its timeout and is about to be terminated.
119        will_terminate: bool,
120    },
121
122    /// A setup script completed execution.
123    SetupScriptFinished {
124        /// If a stress test is being run, the stress index, starting from 0.
125        stress_index: Option<StressIndex>,
126
127        /// The setup script index.
128        index: usize,
129
130        /// The total number of setup scripts.
131        total: usize,
132
133        /// The script ID.
134        script_id: ScriptId,
135
136        /// The program to run.
137        program: String,
138
139        /// The arguments to the program.
140        args: &'a [String],
141
142        /// Whether the JUnit report should store success output for this script.
143        junit_store_success_output: bool,
144
145        /// Whether the JUnit report should store failure output for this script.
146        junit_store_failure_output: bool,
147
148        /// True if some output from the setup script was passed through.
149        no_capture: bool,
150
151        /// The execution status of the setup script.
152        run_status: SetupScriptExecuteStatus,
153    },
154
155    // TODO: add events for BinaryStarted and BinaryFinished? May want a slightly different way to
156    // do things, maybe a couple of reporter traits (one for the run as a whole and one for each
157    // binary).
158    /// A test started running.
159    TestStarted {
160        /// If a stress test is being run, the stress index, starting from 0.
161        stress_index: Option<StressIndex>,
162
163        /// The test instance that was started.
164        test_instance: TestInstance<'a>,
165
166        /// Current run statistics so far.
167        current_stats: RunStats,
168
169        /// The number of tests currently running, including this one.
170        running: usize,
171    },
172
173    /// A test was slower than a configured soft timeout.
174    TestSlow {
175        /// If a stress test is being run, the stress index, starting from 0.
176        stress_index: Option<StressIndex>,
177
178        /// The test instance that was slow.
179        test_instance: TestInstance<'a>,
180
181        /// Retry data.
182        retry_data: RetryData,
183
184        /// The amount of time that has elapsed since the beginning of the test.
185        elapsed: Duration,
186
187        /// True if the test has hit its timeout and is about to be terminated.
188        will_terminate: bool,
189    },
190
191    /// A test attempt failed and will be retried in the future.
192    ///
193    /// This event does not occur on the final run of a failing test.
194    TestAttemptFailedWillRetry {
195        /// If a stress test is being run, the stress index, starting from 0.
196        stress_index: Option<StressIndex>,
197
198        /// The test instance that is being retried.
199        test_instance: TestInstance<'a>,
200
201        /// The status of this attempt to run the test. Will never be success.
202        run_status: ExecuteStatus,
203
204        /// The delay before the next attempt to run the test.
205        delay_before_next_attempt: Duration,
206
207        /// Whether failure outputs are printed out.
208        failure_output: TestOutputDisplay,
209
210        /// The current number of running tests.
211        running: usize,
212    },
213
214    /// A retry has started.
215    TestRetryStarted {
216        /// If a stress test is being run, the stress index, starting from 0.
217        stress_index: Option<StressIndex>,
218
219        /// The test instance that is being retried.
220        test_instance: TestInstance<'a>,
221
222        /// Data related to retries.
223        retry_data: RetryData,
224
225        /// The current number of running tests.
226        running: usize,
227    },
228
229    /// A test finished running.
230    TestFinished {
231        /// If a stress test is being run, the stress index, starting from 0.
232        stress_index: Option<StressIndex>,
233
234        /// The test instance that finished running.
235        test_instance: TestInstance<'a>,
236
237        /// Test setting for success output.
238        success_output: TestOutputDisplay,
239
240        /// Test setting for failure output.
241        failure_output: TestOutputDisplay,
242
243        /// Whether the JUnit report should store success output for this test.
244        junit_store_success_output: bool,
245
246        /// Whether the JUnit report should store failure output for this test.
247        junit_store_failure_output: bool,
248
249        /// Information about all the runs for this test.
250        run_statuses: ExecutionStatuses,
251
252        /// Current statistics for number of tests so far.
253        current_stats: RunStats,
254
255        /// The number of tests that are currently running, excluding this one.
256        running: usize,
257    },
258
259    /// A test was skipped.
260    TestSkipped {
261        /// If a stress test is being run, the stress index, starting from 0.
262        stress_index: Option<StressIndex>,
263
264        /// The test instance that was skipped.
265        test_instance: TestInstance<'a>,
266
267        /// The reason this test was skipped.
268        reason: MismatchReason,
269    },
270
271    /// An information request was received.
272    InfoStarted {
273        /// The number of tasks currently running. This is the same as the
274        /// number of expected responses.
275        total: usize,
276
277        /// Statistics for the run.
278        run_stats: RunStats,
279    },
280
281    /// Information about a script or test was received.
282    InfoResponse {
283        /// The index of the response, starting from 0.
284        index: usize,
285
286        /// The total number of responses expected.
287        total: usize,
288
289        /// The response itself.
290        response: InfoResponse<'a>,
291    },
292
293    /// An information request was completed.
294    InfoFinished {
295        /// The number of responses that were not received. In most cases, this
296        /// is 0.
297        missing: usize,
298    },
299
300    /// `Enter` was pressed. Either a newline or a progress bar snapshot needs
301    /// to be printed.
302    InputEnter {
303        /// Current statistics for number of tests so far.
304        current_stats: RunStats,
305
306        /// The number of tests running.
307        running: usize,
308    },
309
310    /// A cancellation notice was received.
311    RunBeginCancel {
312        /// The number of setup scripts still running.
313        setup_scripts_running: usize,
314
315        /// Current statistics for number of tests so far.
316        ///
317        /// `current_stats.cancel_reason` is set to `Some`.
318        current_stats: RunStats,
319
320        /// The number of tests still running.
321        running: usize,
322    },
323
324    /// A forcible kill was requested due to receiving a signal.
325    RunBeginKill {
326        /// The number of setup scripts still running.
327        setup_scripts_running: usize,
328
329        /// Current statistics for number of tests so far.
330        ///
331        /// `current_stats.cancel_reason` is set to `Some`.
332        current_stats: RunStats,
333
334        /// The number of tests still running.
335        running: usize,
336    },
337
338    /// A SIGTSTP event was received and the run was paused.
339    RunPaused {
340        /// The number of setup scripts running.
341        setup_scripts_running: usize,
342
343        /// The number of tests currently running.
344        running: usize,
345    },
346
347    /// A SIGCONT event was received and the run is being continued.
348    RunContinued {
349        /// The number of setup scripts that will be started up again.
350        setup_scripts_running: usize,
351
352        /// The number of tests that will be started up again.
353        running: usize,
354    },
355
356    /// When running stress tests serially, a sub-run finished.
357    StressSubRunFinished {
358        /// The amount of progress completed so far.
359        progress: StressProgress,
360
361        /// The amount of time it took for this sub-run to complete.
362        sub_elapsed: Duration,
363
364        /// Statistics for the sub-run.
365        sub_stats: RunStats,
366    },
367
368    /// The test run finished.
369    RunFinished {
370        /// The unique ID for this run.
371        run_id: ReportUuid,
372
373        /// The time at which the run was started.
374        start_time: DateTime<FixedOffset>,
375
376        /// The amount of time it took for the tests to run.
377        elapsed: Duration,
378
379        /// Statistics for the run, or overall statistics for stress tests.
380        run_stats: RunFinishedStats,
381    },
382}
383
384/// Progress for a stress test.
385#[derive(Clone, Debug)]
386pub enum StressProgress {
387    /// This is a count-based stress run.
388    Count {
389        /// The total number of stress runs.
390        total: StressCount,
391
392        /// The total time that has elapsed across all stress runs so far.
393        elapsed: Duration,
394
395        /// The number of stress runs that have been completed.
396        completed: u32,
397    },
398
399    /// This is a time-based stress run.
400    Time {
401        /// The total time for the stress run.
402        total: Duration,
403
404        /// The total time that has elapsed across all stress runs so far.
405        elapsed: Duration,
406
407        /// The number of stress runs that have been completed.
408        completed: u32,
409    },
410}
411
412impl StressProgress {
413    /// Returns the remaining amount of work if the progress indicates there's
414    /// still more to do, otherwise `None`.
415    pub fn remaining(&self) -> Option<StressRemaining> {
416        match self {
417            Self::Count {
418                total: StressCount::Count(total),
419                elapsed: _,
420                completed,
421            } => total
422                .get()
423                .checked_sub(*completed)
424                .and_then(|remaining| NonZero::try_from(remaining).ok())
425                .map(StressRemaining::Count),
426            Self::Count {
427                total: StressCount::Infinite,
428                ..
429            } => Some(StressRemaining::Infinite),
430            Self::Time {
431                total,
432                elapsed,
433                completed: _,
434            } => total.checked_sub(*elapsed).map(StressRemaining::Time),
435        }
436    }
437
438    /// Returns a unique ID for this stress sub-run, consisting of the run ID and stress index.
439    pub fn unique_id(&self, run_id: ReportUuid) -> String {
440        let stress_current = match self {
441            Self::Count { completed, .. } | Self::Time { completed, .. } => *completed,
442        };
443        format!("{}:@stress-{}", run_id, stress_current)
444    }
445}
446
447/// For a stress test, the amount of time or number of stress runs remaining.
448#[derive(Clone, Debug)]
449pub enum StressRemaining {
450    /// The number of stress runs remaining, guaranteed to be non-zero.
451    Count(NonZero<u32>),
452
453    /// Infinite number of stress runs remaining.
454    Infinite,
455
456    /// The amount of time remaining.
457    Time(Duration),
458}
459
460/// The index of the current stress run.
461#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
462pub struct StressIndex {
463    /// The 0-indexed index.
464    pub current: u32,
465
466    /// The total number of stress runs, if that is available.
467    pub total: Option<NonZero<u32>>,
468}
469
470/// Statistics for a completed test run or stress run.
471#[derive(Clone, Debug)]
472pub enum RunFinishedStats {
473    /// A single test run was completed.
474    Single(RunStats),
475
476    /// A stress run was completed.
477    Stress(StressRunStats),
478}
479
480impl RunFinishedStats {
481    /// For a single run, returns a summary of statistics as an enum. For a
482    /// stress run, returns a summary for the last sub-run.
483    pub fn final_stats(&self) -> FinalRunStats {
484        match self {
485            Self::Single(stats) => stats.summarize_final(),
486            Self::Stress(stats) => stats.last_final_stats,
487        }
488    }
489}
490
491/// Statistics for a test run.
492#[derive(Copy, Clone, Default, Debug, Eq, PartialEq)]
493pub struct RunStats {
494    /// The total number of tests that were expected to be run at the beginning.
495    ///
496    /// If the test run is cancelled, this will be more than `finished_count` at the end.
497    pub initial_run_count: usize,
498
499    /// The total number of tests that finished running.
500    pub finished_count: usize,
501
502    /// The total number of setup scripts that were expected to be run at the beginning.
503    ///
504    /// If the test run is cancelled, this will be more than `finished_count` at the end.
505    pub setup_scripts_initial_count: usize,
506
507    /// The total number of setup scripts that finished running.
508    pub setup_scripts_finished_count: usize,
509
510    /// The number of setup scripts that passed.
511    pub setup_scripts_passed: usize,
512
513    /// The number of setup scripts that failed.
514    pub setup_scripts_failed: usize,
515
516    /// The number of setup scripts that encountered an execution failure.
517    pub setup_scripts_exec_failed: usize,
518
519    /// The number of setup scripts that timed out.
520    pub setup_scripts_timed_out: usize,
521
522    /// The number of tests that passed. Includes `passed_slow`, `flaky` and `leaky`.
523    pub passed: usize,
524
525    /// The number of slow tests that passed.
526    pub passed_slow: usize,
527
528    /// The number of tests that passed on retry.
529    pub flaky: usize,
530
531    /// The number of tests that failed.
532    pub failed: usize,
533
534    /// The number of failed tests that were slow.
535    pub failed_slow: usize,
536
537    /// The number of tests that timed out.
538    pub timed_out: usize,
539
540    /// The number of tests that passed but leaked handles.
541    pub leaky: usize,
542
543    /// The number of tests that otherwise passed, but leaked handles and were
544    /// treated as failed as a result.
545    pub leaky_failed: usize,
546
547    /// The number of tests that encountered an execution failure.
548    pub exec_failed: usize,
549
550    /// The number of tests that were skipped.
551    pub skipped: usize,
552
553    /// If the run is cancelled, the reason the cancellation is happening.
554    pub cancel_reason: Option<CancelReason>,
555}
556
557impl RunStats {
558    /// Returns true if there are any failures recorded in the stats.
559    pub fn has_failures(&self) -> bool {
560        self.failed_setup_script_count() > 0 || self.failed_count() > 0
561    }
562
563    /// Returns count of setup scripts that did not pass.
564    pub fn failed_setup_script_count(&self) -> usize {
565        self.setup_scripts_failed + self.setup_scripts_exec_failed + self.setup_scripts_timed_out
566    }
567
568    /// Returns count of tests that did not pass.
569    pub fn failed_count(&self) -> usize {
570        self.failed + self.exec_failed + self.timed_out
571    }
572
573    /// Summarizes the stats as an enum at the end of a test run.
574    pub fn summarize_final(&self) -> FinalRunStats {
575        // Check for failures first. The order of setup scripts vs tests should
576        // not be important, though we don't assert that here.
577        if self.failed_setup_script_count() > 0 {
578            // Is this related to a cancellation other than one directly caused
579            // by the failure?
580            if self.cancel_reason > Some(CancelReason::TestFailure) {
581                FinalRunStats::Cancelled {
582                    reason: self.cancel_reason,
583                    kind: RunStatsFailureKind::SetupScript,
584                }
585            } else {
586                FinalRunStats::Failed(RunStatsFailureKind::SetupScript)
587            }
588        } else if self.setup_scripts_initial_count > self.setup_scripts_finished_count {
589            FinalRunStats::Cancelled {
590                reason: self.cancel_reason,
591                kind: RunStatsFailureKind::SetupScript,
592            }
593        } else if self.failed_count() > 0 {
594            let kind = RunStatsFailureKind::Test {
595                initial_run_count: self.initial_run_count,
596                not_run: self.initial_run_count.saturating_sub(self.finished_count),
597            };
598
599            // Is this related to a cancellation other than one directly caused
600            // by the failure?
601            if self.cancel_reason > Some(CancelReason::TestFailure) {
602                FinalRunStats::Cancelled {
603                    reason: self.cancel_reason,
604                    kind,
605                }
606            } else {
607                FinalRunStats::Failed(kind)
608            }
609        } else if self.initial_run_count > self.finished_count {
610            FinalRunStats::Cancelled {
611                reason: self.cancel_reason,
612                kind: RunStatsFailureKind::Test {
613                    initial_run_count: self.initial_run_count,
614                    not_run: self.initial_run_count.saturating_sub(self.finished_count),
615                },
616            }
617        } else if self.finished_count == 0 {
618            FinalRunStats::NoTestsRun
619        } else {
620            FinalRunStats::Success
621        }
622    }
623
624    pub(crate) fn on_setup_script_finished(&mut self, status: &SetupScriptExecuteStatus) {
625        self.setup_scripts_finished_count += 1;
626
627        match status.result {
628            ExecutionResult::Pass
629            | ExecutionResult::Leak {
630                result: LeakTimeoutResult::Pass,
631            } => {
632                self.setup_scripts_passed += 1;
633            }
634            ExecutionResult::Fail { .. }
635            | ExecutionResult::Leak {
636                result: LeakTimeoutResult::Fail,
637            } => {
638                self.setup_scripts_failed += 1;
639            }
640            ExecutionResult::ExecFail => {
641                self.setup_scripts_exec_failed += 1;
642            }
643            ExecutionResult::Timeout => {
644                self.setup_scripts_timed_out += 1;
645            }
646        }
647    }
648
649    pub(crate) fn on_test_finished(&mut self, run_statuses: &ExecutionStatuses) {
650        self.finished_count += 1;
651        // run_statuses is guaranteed to have at least one element.
652        // * If the last element is success, treat it as success (and possibly flaky).
653        // * If the last element is a failure, use it to determine fail/exec fail.
654        // Note that this is different from what Maven Surefire does (use the first failure):
655        // https://maven.apache.org/surefire/maven-surefire-plugin/examples/rerun-failing-tests.html
656        //
657        // This is not likely to matter much in practice since failures are likely to be of the
658        // same type.
659        let last_status = run_statuses.last_status();
660        match last_status.result {
661            ExecutionResult::Pass => {
662                self.passed += 1;
663                if last_status.is_slow {
664                    self.passed_slow += 1;
665                }
666                if run_statuses.len() > 1 {
667                    self.flaky += 1;
668                }
669            }
670            ExecutionResult::Leak {
671                result: LeakTimeoutResult::Pass,
672            } => {
673                self.passed += 1;
674                self.leaky += 1;
675                if last_status.is_slow {
676                    self.passed_slow += 1;
677                }
678                if run_statuses.len() > 1 {
679                    self.flaky += 1;
680                }
681            }
682            ExecutionResult::Leak {
683                result: LeakTimeoutResult::Fail,
684            } => {
685                self.failed += 1;
686                self.leaky_failed += 1;
687                if last_status.is_slow {
688                    self.failed_slow += 1;
689                }
690            }
691            ExecutionResult::Fail { .. } => {
692                self.failed += 1;
693                if last_status.is_slow {
694                    self.failed_slow += 1;
695                }
696            }
697            ExecutionResult::Timeout => self.timed_out += 1,
698            ExecutionResult::ExecFail => self.exec_failed += 1,
699        }
700    }
701}
702
703/// A type summarizing the possible outcomes of a test run.
704#[derive(Copy, Clone, Debug, Eq, PartialEq)]
705pub enum FinalRunStats {
706    /// The test run was successful, or is successful so far.
707    Success,
708
709    /// The test run was successful, or is successful so far, but no tests were selected to run.
710    NoTestsRun,
711
712    /// The test run was cancelled.
713    Cancelled {
714        /// The reason for cancellation, if available.
715        ///
716        /// This should generally be available, but may be None if some tests
717        /// that were selected to run were not executed.
718        reason: Option<CancelReason>,
719
720        /// The kind of failure that occurred.
721        kind: RunStatsFailureKind,
722    },
723
724    /// At least one test failed.
725    Failed(RunStatsFailureKind),
726}
727
728/// Statistics for a stress run.
729#[derive(Clone, Debug)]
730pub struct StressRunStats {
731    /// The number of stress runs completed.
732    pub completed: StressIndex,
733
734    /// The number of stress runs that succeeded.
735    pub success_count: u32,
736
737    /// The number of stress runs that failed.
738    pub failed_count: u32,
739
740    /// The last stress run's `FinalRunStats`.
741    pub last_final_stats: FinalRunStats,
742}
743
744impl StressRunStats {
745    /// Summarizes the stats as an enum at the end of a test run.
746    pub fn summarize_final(&self) -> StressFinalRunStats {
747        if self.failed_count > 0 {
748            StressFinalRunStats::Failed
749        } else if matches!(self.last_final_stats, FinalRunStats::Cancelled { .. }) {
750            StressFinalRunStats::Cancelled
751        } else if matches!(self.last_final_stats, FinalRunStats::NoTestsRun) {
752            StressFinalRunStats::NoTestsRun
753        } else {
754            StressFinalRunStats::Success
755        }
756    }
757}
758
759/// A summary of final statistics for a stress run.
760pub enum StressFinalRunStats {
761    /// The stress run was successful.
762    Success,
763
764    /// No tests were run.
765    NoTestsRun,
766
767    /// The stress run was cancelled.
768    Cancelled,
769
770    /// At least one stress run failed.
771    Failed,
772}
773
774/// A type summarizing the step at which a test run failed.
775#[derive(Copy, Clone, Debug, Eq, PartialEq)]
776pub enum RunStatsFailureKind {
777    /// The run was interrupted during setup script execution.
778    SetupScript,
779
780    /// The run was interrupted during test execution.
781    Test {
782        /// The total number of tests scheduled.
783        initial_run_count: usize,
784
785        /// The number of tests not run, or for a currently-executing test the number queued up to
786        /// run.
787        not_run: usize,
788    },
789}
790
791/// Information about executions of a test, including retries.
792#[derive(Clone, Debug)]
793pub struct ExecutionStatuses {
794    /// This is guaranteed to be non-empty.
795    statuses: Vec<ExecuteStatus>,
796}
797
798#[expect(clippy::len_without_is_empty)] // RunStatuses is never empty
799impl ExecutionStatuses {
800    pub(crate) fn new(statuses: Vec<ExecuteStatus>) -> Self {
801        Self { statuses }
802    }
803
804    /// Returns the last execution status.
805    ///
806    /// This status is typically used as the final result.
807    pub fn last_status(&self) -> &ExecuteStatus {
808        self.statuses
809            .last()
810            .expect("execution statuses is non-empty")
811    }
812
813    /// Iterates over all the statuses.
814    pub fn iter(&self) -> impl DoubleEndedIterator<Item = &'_ ExecuteStatus> + '_ {
815        self.statuses.iter()
816    }
817
818    /// Returns the number of times the test was executed.
819    pub fn len(&self) -> usize {
820        self.statuses.len()
821    }
822
823    /// Returns a description of self.
824    pub fn describe(&self) -> ExecutionDescription<'_> {
825        let last_status = self.last_status();
826        if last_status.result.is_success() {
827            if self.statuses.len() > 1 {
828                ExecutionDescription::Flaky {
829                    last_status,
830                    prior_statuses: &self.statuses[..self.statuses.len() - 1],
831                }
832            } else {
833                ExecutionDescription::Success {
834                    single_status: last_status,
835                }
836            }
837        } else {
838            let first_status = self
839                .statuses
840                .first()
841                .expect("execution statuses is non-empty");
842            let retries = &self.statuses[1..];
843            ExecutionDescription::Failure {
844                first_status,
845                last_status,
846                retries,
847            }
848        }
849    }
850}
851
852/// A description of test executions obtained from `ExecuteStatuses`.
853///
854/// This can be used to quickly determine whether a test passed, failed or was flaky.
855#[derive(Copy, Clone, Debug)]
856pub enum ExecutionDescription<'a> {
857    /// The test was run once and was successful.
858    Success {
859        /// The status of the test.
860        single_status: &'a ExecuteStatus,
861    },
862
863    /// The test was run more than once. The final result was successful.
864    Flaky {
865        /// The last, successful status.
866        last_status: &'a ExecuteStatus,
867
868        /// Previous statuses, none of which are successes.
869        prior_statuses: &'a [ExecuteStatus],
870    },
871
872    /// The test was run once, or possibly multiple times. All runs failed.
873    Failure {
874        /// The first, failing status.
875        first_status: &'a ExecuteStatus,
876
877        /// The last, failing status. Same as the first status if no retries were performed.
878        last_status: &'a ExecuteStatus,
879
880        /// Any retries that were performed. All of these runs failed.
881        ///
882        /// May be empty.
883        retries: &'a [ExecuteStatus],
884    },
885}
886
887impl<'a> ExecutionDescription<'a> {
888    /// Returns the status level for this `ExecutionDescription`.
889    pub fn status_level(&self) -> StatusLevel {
890        match self {
891            ExecutionDescription::Success { single_status } => match single_status.result {
892                ExecutionResult::Leak {
893                    result: LeakTimeoutResult::Pass,
894                } => StatusLevel::Leak,
895                ExecutionResult::Pass => StatusLevel::Pass,
896                other => unreachable!("Success only permits Pass or Leak Pass, found {other:?}"),
897            },
898            // A flaky test implies that we print out retry information for it.
899            ExecutionDescription::Flaky { .. } => StatusLevel::Retry,
900            ExecutionDescription::Failure { .. } => StatusLevel::Fail,
901        }
902    }
903
904    /// Returns the final status level for this `ExecutionDescription`.
905    pub fn final_status_level(&self) -> FinalStatusLevel {
906        match self {
907            ExecutionDescription::Success { single_status, .. } => {
908                // Slow is higher priority than leaky, so return slow first here.
909                if single_status.is_slow {
910                    FinalStatusLevel::Slow
911                } else {
912                    match single_status.result {
913                        ExecutionResult::Pass => FinalStatusLevel::Pass,
914                        ExecutionResult::Leak {
915                            result: LeakTimeoutResult::Pass,
916                        } => FinalStatusLevel::Leak,
917                        other => {
918                            unreachable!("Success only permits Pass or Leak Pass, found {other:?}")
919                        }
920                    }
921                }
922            }
923            // A flaky test implies that we print out retry information for it.
924            ExecutionDescription::Flaky { .. } => FinalStatusLevel::Flaky,
925            ExecutionDescription::Failure { .. } => FinalStatusLevel::Fail,
926        }
927    }
928
929    /// Returns the last run status.
930    pub fn last_status(&self) -> &'a ExecuteStatus {
931        match self {
932            ExecutionDescription::Success {
933                single_status: last_status,
934            }
935            | ExecutionDescription::Flaky { last_status, .. }
936            | ExecutionDescription::Failure { last_status, .. } => last_status,
937        }
938    }
939}
940
941/// Information about a single execution of a test.
942#[derive(Clone, Debug)]
943pub struct ExecuteStatus {
944    /// Retry-related data.
945    pub retry_data: RetryData,
946    /// The stdout and stderr output for this test.
947    pub output: ChildExecutionOutput,
948    /// The execution result for this test: pass, fail or execution error.
949    pub result: ExecutionResult,
950    /// The time at which the test started.
951    pub start_time: DateTime<FixedOffset>,
952    /// The time it took for the test to run.
953    pub time_taken: Duration,
954    /// Whether this test counts as slow.
955    pub is_slow: bool,
956    /// The delay will be non-zero if this is a retry and delay was specified.
957    pub delay_before_start: Duration,
958}
959
960/// Information about the execution of a setup script.
961#[derive(Clone, Debug)]
962pub struct SetupScriptExecuteStatus {
963    /// Output for this setup script.
964    pub output: ChildExecutionOutput,
965
966    /// The execution result for this setup script: pass, fail or execution error.
967    pub result: ExecutionResult,
968
969    /// The time at which the script started.
970    pub start_time: DateTime<FixedOffset>,
971
972    /// The time it took for the script to run.
973    pub time_taken: Duration,
974
975    /// Whether this script counts as slow.
976    pub is_slow: bool,
977
978    /// The map of environment variables that were set by this script.
979    ///
980    /// `None` if an error occurred while running the script or reading the
981    /// environment map.
982    pub env_map: Option<SetupScriptEnvMap>,
983}
984
985/// A map of environment variables set by a setup script.
986///
987/// Part of [`SetupScriptExecuteStatus`].
988#[derive(Clone, Debug)]
989pub struct SetupScriptEnvMap {
990    /// The map of environment variables set by the script.
991    pub env_map: BTreeMap<String, String>,
992}
993
994/// Data related to retries for a test.
995#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)]
996pub struct RetryData {
997    /// The current attempt. In the range `[1, total_attempts]`.
998    pub attempt: u32,
999
1000    /// The total number of times this test can be run. Equal to `1 + retries`.
1001    pub total_attempts: u32,
1002}
1003
1004impl RetryData {
1005    /// Returns true if there are no more attempts after this.
1006    pub fn is_last_attempt(&self) -> bool {
1007        self.attempt >= self.total_attempts
1008    }
1009}
1010
1011/// Whether a test passed, failed or an error occurred while executing the test.
1012#[derive(Copy, Clone, Debug, Eq, PartialEq)]
1013pub enum ExecutionResult {
1014    /// The test passed.
1015    Pass,
1016    /// The test passed but leaked handles. This usually indicates that
1017    /// a subprocess that inherit standard IO was created, but it didn't shut down when
1018    /// the test failed.
1019    Leak {
1020        /// Whether this leak was treated as a failure.
1021        ///
1022        /// Note the difference between `Fail { leaked: true }` and `Leak {
1023        /// failed: true }`. In the former case, the test failed and also leaked
1024        /// handles. In the latter case, the test passed but leaked handles, and
1025        /// configuration indicated that this is a failure.
1026        result: LeakTimeoutResult,
1027    },
1028    /// The test failed.
1029    Fail {
1030        /// The abort status of the test, if any (for example, the signal on Unix).
1031        failure_status: FailureStatus,
1032
1033        /// Whether a test leaked handles. If set to true, this usually indicates that
1034        /// a subprocess that inherit standard IO was created, but it didn't shut down when
1035        /// the test failed.
1036        leaked: bool,
1037    },
1038    /// An error occurred while executing the test.
1039    ExecFail,
1040    /// The test was terminated due to a timeout.
1041    Timeout,
1042}
1043
1044impl ExecutionResult {
1045    /// Returns true if the test was successful.
1046    pub fn is_success(self) -> bool {
1047        match self {
1048            ExecutionResult::Pass
1049            | ExecutionResult::Leak {
1050                result: LeakTimeoutResult::Pass,
1051            } => true,
1052            ExecutionResult::Leak {
1053                result: LeakTimeoutResult::Fail,
1054            }
1055            | ExecutionResult::Fail { .. }
1056            | ExecutionResult::ExecFail
1057            | ExecutionResult::Timeout => false,
1058        }
1059    }
1060
1061    /// Returns true if this result represents a test that was terminated by nextest
1062    /// (as opposed to failing naturally).
1063    ///
1064    /// This is used to suppress output spam when immediate termination is active.
1065    ///
1066    /// TODO: This is a heuristic that checks if the test was terminated by SIGTERM (Unix) or
1067    /// job object (Windows). In an edge case, a test could send SIGTERM to itself, which would
1068    /// incorrectly be detected as a nextest-initiated termination. A more robust solution would
1069    /// track which tests were explicitly sent termination signals by nextest.
1070    pub fn is_termination_failure(&self) -> bool {
1071        match self {
1072            #[cfg(unix)]
1073            ExecutionResult::Fail {
1074                failure_status: FailureStatus::Abort(AbortStatus::UnixSignal(libc::SIGTERM)),
1075                ..
1076            } => true,
1077            #[cfg(windows)]
1078            ExecutionResult::Fail {
1079                failure_status: FailureStatus::Abort(AbortStatus::JobObject),
1080                ..
1081            } => true,
1082            _ => false,
1083        }
1084    }
1085
1086    /// Returns a static string representation of the result.
1087    pub fn as_static_str(&self) -> &'static str {
1088        match self {
1089            ExecutionResult::Pass => "pass",
1090            ExecutionResult::Leak { .. } => "leak",
1091            ExecutionResult::Fail { .. } => "fail",
1092            ExecutionResult::ExecFail => "exec-fail",
1093            ExecutionResult::Timeout => "timeout",
1094        }
1095    }
1096}
1097
1098/// Failure status: either an exit code or an abort status.
1099#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1100pub enum FailureStatus {
1101    /// The test exited with a non-zero exit code.
1102    ExitCode(i32),
1103
1104    /// The test aborted.
1105    Abort(AbortStatus),
1106}
1107
1108impl FailureStatus {
1109    /// Extract the failure status from an `ExitStatus`.
1110    pub fn extract(exit_status: ExitStatus) -> Self {
1111        if let Some(abort_status) = AbortStatus::extract(exit_status) {
1112            FailureStatus::Abort(abort_status)
1113        } else {
1114            FailureStatus::ExitCode(
1115                exit_status
1116                    .code()
1117                    .expect("if abort_status is None, then code must be present"),
1118            )
1119        }
1120    }
1121}
1122
1123/// A regular exit code or Windows NT abort status for a test.
1124///
1125/// Returned as part of the [`ExecutionResult::Fail`] variant.
1126#[derive(Copy, Clone, Eq, PartialEq)]
1127pub enum AbortStatus {
1128    /// The test was aborted due to a signal on Unix.
1129    #[cfg(unix)]
1130    UnixSignal(i32),
1131
1132    /// The test was determined to have aborted because the high bit was set on Windows.
1133    #[cfg(windows)]
1134    WindowsNtStatus(windows_sys::Win32::Foundation::NTSTATUS),
1135
1136    /// The test was terminated via job object on Windows.
1137    #[cfg(windows)]
1138    JobObject,
1139}
1140
1141impl AbortStatus {
1142    /// Extract the abort status from an [`ExitStatus`].
1143    pub fn extract(exit_status: ExitStatus) -> Option<Self> {
1144        cfg_if::cfg_if! {
1145            if #[cfg(unix)] {
1146                // On Unix, extract the signal if it's found.
1147                use std::os::unix::process::ExitStatusExt;
1148                exit_status.signal().map(AbortStatus::UnixSignal)
1149            } else if #[cfg(windows)] {
1150                exit_status.code().and_then(|code| {
1151                    (code < 0).then_some(AbortStatus::WindowsNtStatus(code))
1152                })
1153            } else {
1154                None
1155            }
1156        }
1157    }
1158}
1159
1160impl fmt::Debug for AbortStatus {
1161    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1162        match self {
1163            #[cfg(unix)]
1164            AbortStatus::UnixSignal(signal) => write!(f, "UnixSignal({signal})"),
1165            #[cfg(windows)]
1166            AbortStatus::WindowsNtStatus(status) => write!(f, "WindowsNtStatus({status:x})"),
1167            #[cfg(windows)]
1168            AbortStatus::JobObject => write!(f, "JobObject"),
1169        }
1170    }
1171}
1172
1173// Note: the order here matters -- it indicates severity of cancellation
1174/// The reason why a test run is being cancelled.
1175#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1176#[cfg_attr(test, derive(test_strategy::Arbitrary))]
1177pub enum CancelReason {
1178    /// A setup script failed.
1179    SetupScriptFailure,
1180
1181    /// A test failed and --no-fail-fast wasn't specified.
1182    TestFailure,
1183
1184    /// An error occurred while reporting results.
1185    ReportError,
1186
1187    /// The global timeout was exceeded.
1188    GlobalTimeout,
1189
1190    /// A test failed and fail-fast with immediate termination was specified.
1191    TestFailureImmediate,
1192
1193    /// A termination signal (on Unix, SIGTERM or SIGHUP) was received.
1194    Signal,
1195
1196    /// An interrupt (on Unix, Ctrl-C) was received.
1197    Interrupt,
1198
1199    /// A second signal was received, and the run is being forcibly killed.
1200    SecondSignal,
1201}
1202
1203impl CancelReason {
1204    pub(crate) fn to_static_str(self) -> &'static str {
1205        match self {
1206            CancelReason::SetupScriptFailure => "setup script failure",
1207            CancelReason::TestFailure => "test failure",
1208            CancelReason::ReportError => "reporting error",
1209            CancelReason::GlobalTimeout => "global timeout",
1210            CancelReason::TestFailureImmediate => "test failure",
1211            CancelReason::Signal => "signal",
1212            CancelReason::Interrupt => "interrupt",
1213            CancelReason::SecondSignal => "second signal",
1214        }
1215    }
1216}
1217/// The kind of unit of work that nextest is executing.
1218#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1219pub enum UnitKind {
1220    /// A test.
1221    Test,
1222
1223    /// A script (e.g. a setup script).
1224    Script,
1225}
1226
1227impl UnitKind {
1228    pub(crate) const WAITING_ON_TEST_MESSAGE: &str = "waiting on test process";
1229    pub(crate) const WAITING_ON_SCRIPT_MESSAGE: &str = "waiting on script process";
1230
1231    pub(crate) const EXECUTING_TEST_MESSAGE: &str = "executing test";
1232    pub(crate) const EXECUTING_SCRIPT_MESSAGE: &str = "executing script";
1233
1234    pub(crate) fn waiting_on_message(&self) -> &'static str {
1235        match self {
1236            UnitKind::Test => Self::WAITING_ON_TEST_MESSAGE,
1237            UnitKind::Script => Self::WAITING_ON_SCRIPT_MESSAGE,
1238        }
1239    }
1240
1241    pub(crate) fn executing_message(&self) -> &'static str {
1242        match self {
1243            UnitKind::Test => Self::EXECUTING_TEST_MESSAGE,
1244            UnitKind::Script => Self::EXECUTING_SCRIPT_MESSAGE,
1245        }
1246    }
1247}
1248
1249impl fmt::Display for UnitKind {
1250    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1251        match self {
1252            UnitKind::Script => write!(f, "script"),
1253            UnitKind::Test => write!(f, "test"),
1254        }
1255    }
1256}
1257
1258/// A response to an information request.
1259#[derive(Clone, Debug)]
1260pub enum InfoResponse<'a> {
1261    /// A setup script's response.
1262    SetupScript(SetupScriptInfoResponse<'a>),
1263
1264    /// A test's response.
1265    Test(TestInfoResponse<'a>),
1266}
1267
1268/// A setup script's response to an information request.
1269#[derive(Clone, Debug)]
1270pub struct SetupScriptInfoResponse<'a> {
1271    /// The stress index of the setup script.
1272    pub stress_index: Option<StressIndex>,
1273
1274    /// The identifier of the setup script instance.
1275    pub script_id: ScriptId,
1276
1277    /// The program to run.
1278    pub program: String,
1279
1280    /// The list of arguments to the program.
1281    pub args: &'a [String],
1282
1283    /// The state of the setup script.
1284    pub state: UnitState,
1285
1286    /// Output obtained from the setup script.
1287    pub output: ChildExecutionOutput,
1288}
1289
1290/// A test's response to an information request.
1291#[derive(Clone, Debug)]
1292pub struct TestInfoResponse<'a> {
1293    /// The stress index of the test.
1294    pub stress_index: Option<StressIndex>,
1295
1296    /// The test instance that the information is about.
1297    pub test_instance: TestInstanceId<'a>,
1298
1299    /// Information about retries.
1300    pub retry_data: RetryData,
1301
1302    /// The state of the test.
1303    pub state: UnitState,
1304
1305    /// Output obtained from the test.
1306    pub output: ChildExecutionOutput,
1307}
1308
1309/// The current state of a test or script process: running, exiting, or
1310/// terminating.
1311///
1312/// Part of information response requests.
1313#[derive(Clone, Debug)]
1314pub enum UnitState {
1315    /// The unit is currently running.
1316    Running {
1317        /// The process ID.
1318        pid: u32,
1319
1320        /// The amount of time the unit has been running.
1321        time_taken: Duration,
1322
1323        /// `Some` if the test is marked as slow, along with the duration after
1324        /// which it was marked as slow.
1325        slow_after: Option<Duration>,
1326    },
1327
1328    /// The test has finished running, and is currently in the process of
1329    /// exiting.
1330    Exiting {
1331        /// The process ID.
1332        pid: u32,
1333
1334        /// The amount of time the unit ran for.
1335        time_taken: Duration,
1336
1337        /// `Some` if the unit is marked as slow, along with the duration after
1338        /// which it was marked as slow.
1339        slow_after: Option<Duration>,
1340
1341        /// The tentative execution result before leaked status is determined.
1342        ///
1343        /// None means that the exit status could not be read, and should be
1344        /// treated as a failure.
1345        tentative_result: Option<ExecutionResult>,
1346
1347        /// How long has been spent waiting for the process to exit.
1348        waiting_duration: Duration,
1349
1350        /// How much longer nextest will wait until the test is marked leaky.
1351        remaining: Duration,
1352    },
1353
1354    /// The child process is being terminated by nextest.
1355    Terminating(UnitTerminatingState),
1356
1357    /// The unit has finished running and the process has exited.
1358    Exited {
1359        /// The result of executing the unit.
1360        result: ExecutionResult,
1361
1362        /// The amount of time the unit ran for.
1363        time_taken: Duration,
1364
1365        /// `Some` if the unit is marked as slow, along with the duration after
1366        /// which it was marked as slow.
1367        slow_after: Option<Duration>,
1368    },
1369
1370    /// A delay is being waited out before the next attempt of the test is
1371    /// started. (Only relevant for tests.)
1372    DelayBeforeNextAttempt {
1373        /// The previous execution result.
1374        previous_result: ExecutionResult,
1375
1376        /// Whether the previous attempt was marked as slow.
1377        previous_slow: bool,
1378
1379        /// How long has been spent waiting so far.
1380        waiting_duration: Duration,
1381
1382        /// How much longer nextest will wait until retrying the test.
1383        remaining: Duration,
1384    },
1385}
1386
1387impl UnitState {
1388    /// Returns true if the state has a valid output attached to it.
1389    pub fn has_valid_output(&self) -> bool {
1390        match self {
1391            UnitState::Running { .. }
1392            | UnitState::Exiting { .. }
1393            | UnitState::Terminating(_)
1394            | UnitState::Exited { .. } => true,
1395            UnitState::DelayBeforeNextAttempt { .. } => false,
1396        }
1397    }
1398}
1399
1400/// The current terminating state of a test or script process.
1401///
1402/// Part of [`UnitState::Terminating`].
1403#[derive(Clone, Debug)]
1404pub struct UnitTerminatingState {
1405    /// The process ID.
1406    pub pid: u32,
1407
1408    /// The amount of time the unit ran for.
1409    pub time_taken: Duration,
1410
1411    /// The reason for the termination.
1412    pub reason: UnitTerminateReason,
1413
1414    /// The method by which the process is being terminated.
1415    pub method: UnitTerminateMethod,
1416
1417    /// How long has been spent waiting for the process to exit.
1418    pub waiting_duration: Duration,
1419
1420    /// How much longer nextest will wait until a kill command is sent to the process.
1421    pub remaining: Duration,
1422}
1423
1424/// The reason for a script or test being forcibly terminated by nextest.
1425///
1426/// Part of information response requests.
1427#[derive(Clone, Copy, Debug)]
1428pub enum UnitTerminateReason {
1429    /// The unit is being terminated due to a test timeout being hit.
1430    Timeout,
1431
1432    /// The unit is being terminated due to nextest receiving a signal.
1433    Signal,
1434
1435    /// The unit is being terminated due to an interrupt (i.e. Ctrl-C).
1436    Interrupt,
1437}
1438
1439impl fmt::Display for UnitTerminateReason {
1440    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1441        match self {
1442            UnitTerminateReason::Timeout => write!(f, "timeout"),
1443            UnitTerminateReason::Signal => write!(f, "signal"),
1444            UnitTerminateReason::Interrupt => write!(f, "interrupt"),
1445        }
1446    }
1447}
1448
1449/// The way in which a script or test is being forcibly terminated by nextest.
1450#[derive(Clone, Copy, Debug)]
1451pub enum UnitTerminateMethod {
1452    /// The unit is being terminated by sending a signal.
1453    #[cfg(unix)]
1454    Signal(UnitTerminateSignal),
1455
1456    /// The unit is being terminated by terminating the Windows job object.
1457    #[cfg(windows)]
1458    JobObject,
1459
1460    /// The unit is being waited on to exit. A termination signal will be sent
1461    /// if it doesn't exit within the grace period.
1462    ///
1463    /// On Windows, this occurs when nextest receives Ctrl-C. In that case, it
1464    /// is assumed that tests will also receive Ctrl-C and exit on their own. If
1465    /// tests do not exit within the grace period configured for them, their
1466    /// corresponding job objects will be terminated.
1467    #[cfg(windows)]
1468    Wait,
1469
1470    /// A fake method used for testing.
1471    #[cfg(test)]
1472    Fake,
1473}
1474
1475#[cfg(unix)]
1476/// The signal that is or was sent to terminate a script or test.
1477#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1478pub enum UnitTerminateSignal {
1479    /// The unit is being terminated by sending a SIGINT.
1480    Interrupt,
1481
1482    /// The unit is being terminated by sending a SIGTERM signal.
1483    Term,
1484
1485    /// The unit is being terminated by sending a SIGHUP signal.
1486    Hangup,
1487
1488    /// The unit is being terminated by sending a SIGQUIT signal.
1489    Quit,
1490
1491    /// The unit is being terminated by sending a SIGKILL signal.
1492    Kill,
1493}
1494
1495#[cfg(unix)]
1496impl fmt::Display for UnitTerminateSignal {
1497    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1498        match self {
1499            UnitTerminateSignal::Interrupt => write!(f, "SIGINT"),
1500            UnitTerminateSignal::Term => write!(f, "SIGTERM"),
1501            UnitTerminateSignal::Hangup => write!(f, "SIGHUP"),
1502            UnitTerminateSignal::Quit => write!(f, "SIGQUIT"),
1503            UnitTerminateSignal::Kill => write!(f, "SIGKILL"),
1504        }
1505    }
1506}
1507
1508#[cfg(test)]
1509mod tests {
1510    use super::*;
1511
1512    #[test]
1513    fn test_is_success() {
1514        assert_eq!(
1515            RunStats::default().summarize_final(),
1516            FinalRunStats::NoTestsRun,
1517            "empty run => no tests run"
1518        );
1519        assert_eq!(
1520            RunStats {
1521                initial_run_count: 42,
1522                finished_count: 42,
1523                ..RunStats::default()
1524            }
1525            .summarize_final(),
1526            FinalRunStats::Success,
1527            "initial run count = final run count => success"
1528        );
1529        assert_eq!(
1530            RunStats {
1531                initial_run_count: 42,
1532                finished_count: 41,
1533                ..RunStats::default()
1534            }
1535            .summarize_final(),
1536            FinalRunStats::Cancelled {
1537                reason: None,
1538                kind: RunStatsFailureKind::Test {
1539                    initial_run_count: 42,
1540                    not_run: 1
1541                }
1542            },
1543            "initial run count > final run count => cancelled"
1544        );
1545        assert_eq!(
1546            RunStats {
1547                initial_run_count: 42,
1548                finished_count: 42,
1549                failed: 1,
1550                ..RunStats::default()
1551            }
1552            .summarize_final(),
1553            FinalRunStats::Failed(RunStatsFailureKind::Test {
1554                initial_run_count: 42,
1555                not_run: 0
1556            }),
1557            "failed => failure"
1558        );
1559        assert_eq!(
1560            RunStats {
1561                initial_run_count: 42,
1562                finished_count: 42,
1563                exec_failed: 1,
1564                ..RunStats::default()
1565            }
1566            .summarize_final(),
1567            FinalRunStats::Failed(RunStatsFailureKind::Test {
1568                initial_run_count: 42,
1569                not_run: 0
1570            }),
1571            "exec failed => failure"
1572        );
1573        assert_eq!(
1574            RunStats {
1575                initial_run_count: 42,
1576                finished_count: 42,
1577                timed_out: 1,
1578                ..RunStats::default()
1579            }
1580            .summarize_final(),
1581            FinalRunStats::Failed(RunStatsFailureKind::Test {
1582                initial_run_count: 42,
1583                not_run: 0
1584            }),
1585            "timed out => failure"
1586        );
1587        assert_eq!(
1588            RunStats {
1589                initial_run_count: 42,
1590                finished_count: 42,
1591                skipped: 1,
1592                ..RunStats::default()
1593            }
1594            .summarize_final(),
1595            FinalRunStats::Success,
1596            "skipped => not considered a failure"
1597        );
1598
1599        assert_eq!(
1600            RunStats {
1601                setup_scripts_initial_count: 2,
1602                setup_scripts_finished_count: 1,
1603                ..RunStats::default()
1604            }
1605            .summarize_final(),
1606            FinalRunStats::Cancelled {
1607                reason: None,
1608                kind: RunStatsFailureKind::SetupScript,
1609            },
1610            "setup script failed => failure"
1611        );
1612
1613        assert_eq!(
1614            RunStats {
1615                setup_scripts_initial_count: 2,
1616                setup_scripts_finished_count: 2,
1617                setup_scripts_failed: 1,
1618                ..RunStats::default()
1619            }
1620            .summarize_final(),
1621            FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1622            "setup script failed => failure"
1623        );
1624        assert_eq!(
1625            RunStats {
1626                setup_scripts_initial_count: 2,
1627                setup_scripts_finished_count: 2,
1628                setup_scripts_exec_failed: 1,
1629                ..RunStats::default()
1630            }
1631            .summarize_final(),
1632            FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1633            "setup script exec failed => failure"
1634        );
1635        assert_eq!(
1636            RunStats {
1637                setup_scripts_initial_count: 2,
1638                setup_scripts_finished_count: 2,
1639                setup_scripts_timed_out: 1,
1640                ..RunStats::default()
1641            }
1642            .summarize_final(),
1643            FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1644            "setup script timed out => failure"
1645        );
1646        assert_eq!(
1647            RunStats {
1648                setup_scripts_initial_count: 2,
1649                setup_scripts_finished_count: 2,
1650                setup_scripts_passed: 2,
1651                ..RunStats::default()
1652            }
1653            .summarize_final(),
1654            FinalRunStats::NoTestsRun,
1655            "setup scripts passed => success, but no tests run"
1656        );
1657    }
1658}