nextest_runner/reporter/
events.rs

1// Copyright (c) The nextest Contributors
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Events for the reporter.
5//!
6//! These types form the interface between the test runner and the test
7//! reporter. The root structure for all events is [`TestEvent`].
8
9use super::{FinalStatusLevel, StatusLevel, TestOutputDisplay};
10use crate::{
11    config::{elements::LeakTimeoutResult, scripts::ScriptId},
12    list::{TestInstance, TestInstanceId, TestList},
13    runner::{StressCondition, StressCount},
14    test_output::ChildExecutionOutput,
15};
16use chrono::{DateTime, FixedOffset};
17use nextest_metadata::MismatchReason;
18use quick_junit::ReportUuid;
19use std::{collections::BTreeMap, fmt, num::NonZero, process::ExitStatus, time::Duration};
20
21/// A test event.
22///
23/// Events are produced by a [`TestRunner`](crate::runner::TestRunner) and
24/// consumed by a [`Reporter`](crate::reporter::Reporter).
25#[derive(Clone, Debug)]
26pub struct TestEvent<'a> {
27    /// The time at which the event was generated, including the offset from UTC.
28    pub timestamp: DateTime<FixedOffset>,
29
30    /// The amount of time elapsed since the start of the test run.
31    pub elapsed: Duration,
32
33    /// The kind of test event this is.
34    pub kind: TestEventKind<'a>,
35}
36
37/// The kind of test event this is.
38///
39/// Forms part of [`TestEvent`].
40#[derive(Clone, Debug)]
41pub enum TestEventKind<'a> {
42    /// The test run started.
43    RunStarted {
44        /// The list of tests that will be run.
45        ///
46        /// The methods on the test list indicate the number of tests that will be run.
47        test_list: &'a TestList<'a>,
48
49        /// The UUID for this run.
50        run_id: ReportUuid,
51
52        /// The nextest profile chosen for this run.
53        profile_name: String,
54
55        /// The command-line arguments for the process.
56        cli_args: Vec<String>,
57
58        /// The stress condition for this run, if any.
59        stress_condition: Option<StressCondition>,
60    },
61
62    /// When running stress tests serially, a sub-run started.
63    StressSubRunStarted {
64        /// The amount of progress completed so far.
65        progress: StressProgress,
66    },
67
68    /// A setup script started.
69    SetupScriptStarted {
70        /// If a stress test is being run, the stress index, starting from 0.
71        stress_index: Option<StressIndex>,
72
73        /// The setup script index.
74        index: usize,
75
76        /// The total number of setup scripts.
77        total: usize,
78
79        /// The script ID.
80        script_id: ScriptId,
81
82        /// The program to run.
83        program: String,
84
85        /// The arguments to the program.
86        args: &'a [String],
87
88        /// True if some output from the setup script is being passed through.
89        no_capture: bool,
90    },
91
92    /// A setup script was slow.
93    SetupScriptSlow {
94        /// If a stress test is being run, the stress index, starting from 0.
95        stress_index: Option<StressIndex>,
96
97        /// The script ID.
98        script_id: ScriptId,
99
100        /// The program to run.
101        program: String,
102
103        /// The arguments to the program.
104        args: &'a [String],
105
106        /// The amount of time elapsed since the start of execution.
107        elapsed: Duration,
108
109        /// True if the script has hit its timeout and is about to be terminated.
110        will_terminate: bool,
111    },
112
113    /// A setup script completed execution.
114    SetupScriptFinished {
115        /// If a stress test is being run, the stress index, starting from 0.
116        stress_index: Option<StressIndex>,
117
118        /// The setup script index.
119        index: usize,
120
121        /// The total number of setup scripts.
122        total: usize,
123
124        /// The script ID.
125        script_id: ScriptId,
126
127        /// The program to run.
128        program: String,
129
130        /// The arguments to the program.
131        args: &'a [String],
132
133        /// Whether the JUnit report should store success output for this script.
134        junit_store_success_output: bool,
135
136        /// Whether the JUnit report should store failure output for this script.
137        junit_store_failure_output: bool,
138
139        /// True if some output from the setup script was passed through.
140        no_capture: bool,
141
142        /// The execution status of the setup script.
143        run_status: SetupScriptExecuteStatus,
144    },
145
146    // TODO: add events for BinaryStarted and BinaryFinished? May want a slightly different way to
147    // do things, maybe a couple of reporter traits (one for the run as a whole and one for each
148    // binary).
149    /// A test started running.
150    TestStarted {
151        /// If a stress test is being run, the stress index, starting from 0.
152        stress_index: Option<StressIndex>,
153
154        /// The test instance that was started.
155        test_instance: TestInstance<'a>,
156
157        /// Current run statistics so far.
158        current_stats: RunStats,
159
160        /// The number of tests currently running, including this one.
161        running: usize,
162    },
163
164    /// Show test-specific progress.
165    ///
166    /// This event is emitted shortly after a test starts running, and is used
167    /// as a trigger to show test-related progress. This is to prevent many very
168    /// short-lived tests from overwhelming the display code.
169    TestShowProgress {
170        /// If a stress test is being run, the stress index, starting from 0.
171        stress_index: Option<StressIndex>,
172
173        /// Retry data.
174        retry_data: RetryData,
175
176        /// The test instance that was started.
177        test_instance: TestInstance<'a>,
178    },
179
180    /// A test was slower than a configured soft timeout.
181    TestSlow {
182        /// If a stress test is being run, the stress index, starting from 0.
183        stress_index: Option<StressIndex>,
184
185        /// The test instance that was slow.
186        test_instance: TestInstance<'a>,
187
188        /// Retry data.
189        retry_data: RetryData,
190
191        /// The amount of time that has elapsed since the beginning of the test.
192        elapsed: Duration,
193
194        /// True if the test has hit its timeout and is about to be terminated.
195        will_terminate: bool,
196    },
197
198    /// A test attempt failed and will be retried in the future.
199    ///
200    /// This event does not occur on the final run of a failing test.
201    TestAttemptFailedWillRetry {
202        /// If a stress test is being run, the stress index, starting from 0.
203        stress_index: Option<StressIndex>,
204
205        /// The test instance that is being retried.
206        test_instance: TestInstance<'a>,
207
208        /// The status of this attempt to run the test. Will never be success.
209        run_status: ExecuteStatus,
210
211        /// The delay before the next attempt to run the test.
212        delay_before_next_attempt: Duration,
213
214        /// Whether failure outputs are printed out.
215        failure_output: TestOutputDisplay,
216    },
217
218    /// A retry has started.
219    TestRetryStarted {
220        /// If a stress test is being run, the stress index, starting from 0.
221        stress_index: Option<StressIndex>,
222
223        /// The test instance that is being retried.
224        test_instance: TestInstance<'a>,
225
226        /// Data related to retries.
227        retry_data: RetryData,
228    },
229
230    /// A test finished running.
231    TestFinished {
232        /// If a stress test is being run, the stress index, starting from 0.
233        stress_index: Option<StressIndex>,
234
235        /// The test instance that finished running.
236        test_instance: TestInstance<'a>,
237
238        /// Test setting for success output.
239        success_output: TestOutputDisplay,
240
241        /// Test setting for failure output.
242        failure_output: TestOutputDisplay,
243
244        /// Whether the JUnit report should store success output for this test.
245        junit_store_success_output: bool,
246
247        /// Whether the JUnit report should store failure output for this test.
248        junit_store_failure_output: bool,
249
250        /// Information about all the runs for this test.
251        run_statuses: ExecutionStatuses,
252
253        /// Current statistics for number of tests so far.
254        current_stats: RunStats,
255
256        /// The number of tests that are currently running, excluding this one.
257        running: usize,
258    },
259
260    /// A test was skipped.
261    TestSkipped {
262        /// If a stress test is being run, the stress index, starting from 0.
263        stress_index: Option<StressIndex>,
264
265        /// The test instance that was skipped.
266        test_instance: TestInstance<'a>,
267
268        /// The reason this test was skipped.
269        reason: MismatchReason,
270    },
271
272    /// An information request was received.
273    InfoStarted {
274        /// The number of tasks currently running. This is the same as the
275        /// number of expected responses.
276        total: usize,
277
278        /// Statistics for the run.
279        run_stats: RunStats,
280    },
281
282    /// Information about a script or test was received.
283    InfoResponse {
284        /// The index of the response, starting from 0.
285        index: usize,
286
287        /// The total number of responses expected.
288        total: usize,
289
290        /// The response itself.
291        response: InfoResponse<'a>,
292    },
293
294    /// An information request was completed.
295    InfoFinished {
296        /// The number of responses that were not received. In most cases, this
297        /// is 0.
298        missing: usize,
299    },
300
301    /// `Enter` was pressed. Either a newline or a progress bar snapshot needs
302    /// to be printed.
303    InputEnter {
304        /// Current statistics for number of tests so far.
305        current_stats: RunStats,
306
307        /// The number of tests running.
308        running: usize,
309    },
310
311    /// A cancellation notice was received.
312    RunBeginCancel {
313        /// The number of setup scripts still running.
314        setup_scripts_running: usize,
315
316        /// Current statistics for number of tests so far.
317        ///
318        /// `current_stats.cancel_reason` is set to `Some`.
319        current_stats: RunStats,
320
321        /// The number of tests still running.
322        running: usize,
323    },
324
325    /// A forcible kill was requested due to receiving a signal.
326    RunBeginKill {
327        /// The number of setup scripts still running.
328        setup_scripts_running: usize,
329
330        /// Current statistics for number of tests so far.
331        ///
332        /// `current_stats.cancel_reason` is set to `Some`.
333        current_stats: RunStats,
334
335        /// The number of tests still running.
336        running: usize,
337    },
338
339    /// A SIGTSTP event was received and the run was paused.
340    RunPaused {
341        /// The number of setup scripts running.
342        setup_scripts_running: usize,
343
344        /// The number of tests currently running.
345        running: usize,
346    },
347
348    /// A SIGCONT event was received and the run is being continued.
349    RunContinued {
350        /// The number of setup scripts that will be started up again.
351        setup_scripts_running: usize,
352
353        /// The number of tests that will be started up again.
354        running: usize,
355    },
356
357    /// When running stress tests serially, a sub-run finished.
358    StressSubRunFinished {
359        /// The amount of progress completed so far.
360        progress: StressProgress,
361
362        /// The amount of time it took for this sub-run to complete.
363        sub_elapsed: Duration,
364
365        /// Statistics for the sub-run.
366        sub_stats: RunStats,
367    },
368
369    /// The test run finished.
370    RunFinished {
371        /// The unique ID for this run.
372        run_id: ReportUuid,
373
374        /// The time at which the run was started.
375        start_time: DateTime<FixedOffset>,
376
377        /// The amount of time it took for the tests to run.
378        elapsed: Duration,
379
380        /// Statistics for the run, or overall statistics for stress tests.
381        run_stats: RunFinishedStats,
382    },
383}
384
385/// Progress for a stress test.
386#[derive(Clone, Debug)]
387pub enum StressProgress {
388    /// This is a count-based stress run.
389    Count {
390        /// The total number of stress runs.
391        total: StressCount,
392
393        /// The total time that has elapsed across all stress runs so far.
394        elapsed: Duration,
395
396        /// The number of stress runs that have been completed.
397        completed: u32,
398    },
399
400    /// This is a time-based stress run.
401    Time {
402        /// The total time for the stress run.
403        total: Duration,
404
405        /// The total time that has elapsed across all stress runs so far.
406        elapsed: Duration,
407
408        /// The number of stress runs that have been completed.
409        completed: u32,
410    },
411}
412
413impl StressProgress {
414    /// Returns the remaining amount of work if the progress indicates there's
415    /// still more to do, otherwise `None`.
416    pub fn remaining(&self) -> Option<StressRemaining> {
417        match self {
418            Self::Count {
419                total: StressCount::Count(total),
420                elapsed: _,
421                completed,
422            } => total
423                .get()
424                .checked_sub(*completed)
425                .and_then(|remaining| NonZero::try_from(remaining).ok())
426                .map(StressRemaining::Count),
427            Self::Count {
428                total: StressCount::Infinite,
429                ..
430            } => Some(StressRemaining::Infinite),
431            Self::Time {
432                total,
433                elapsed,
434                completed: _,
435            } => total.checked_sub(*elapsed).map(StressRemaining::Time),
436        }
437    }
438
439    /// Returns a unique ID for this stress sub-run, consisting of the run ID and stress index.
440    pub fn unique_id(&self, run_id: ReportUuid) -> String {
441        let stress_current = match self {
442            Self::Count { completed, .. } | Self::Time { completed, .. } => *completed,
443        };
444        format!("{}:@stress-{}", run_id, stress_current)
445    }
446}
447
448/// For a stress test, the amount of time or number of stress runs remaining.
449#[derive(Clone, Debug)]
450pub enum StressRemaining {
451    /// The number of stress runs remaining, guaranteed to be non-zero.
452    Count(NonZero<u32>),
453
454    /// Infinite number of stress runs remaining.
455    Infinite,
456
457    /// The amount of time remaining.
458    Time(Duration),
459}
460
461/// The index of the current stress run.
462#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
463pub struct StressIndex {
464    /// The 0-indexed index.
465    pub current: u32,
466
467    /// The total number of stress runs, if that is available.
468    pub total: Option<NonZero<u32>>,
469}
470
471/// Statistics for a completed test run or stress run.
472#[derive(Clone, Debug)]
473pub enum RunFinishedStats {
474    /// A single test run was completed.
475    Single(RunStats),
476
477    /// A stress run was completed.
478    Stress(StressRunStats),
479}
480
481impl RunFinishedStats {
482    /// For a single run, returns a summary of statistics as an enum. For a
483    /// stress run, returns a summary for the last sub-run.
484    pub fn final_stats(&self) -> FinalRunStats {
485        match self {
486            Self::Single(stats) => stats.summarize_final(),
487            Self::Stress(stats) => stats.last_final_stats,
488        }
489    }
490}
491
492/// Statistics for a test run.
493#[derive(Copy, Clone, Default, Debug, Eq, PartialEq)]
494pub struct RunStats {
495    /// The total number of tests that were expected to be run at the beginning.
496    ///
497    /// If the test run is cancelled, this will be more than `finished_count` at the end.
498    pub initial_run_count: usize,
499
500    /// The total number of tests that finished running.
501    pub finished_count: usize,
502
503    /// The total number of setup scripts that were expected to be run at the beginning.
504    ///
505    /// If the test run is cancelled, this will be more than `finished_count` at the end.
506    pub setup_scripts_initial_count: usize,
507
508    /// The total number of setup scripts that finished running.
509    pub setup_scripts_finished_count: usize,
510
511    /// The number of setup scripts that passed.
512    pub setup_scripts_passed: usize,
513
514    /// The number of setup scripts that failed.
515    pub setup_scripts_failed: usize,
516
517    /// The number of setup scripts that encountered an execution failure.
518    pub setup_scripts_exec_failed: usize,
519
520    /// The number of setup scripts that timed out.
521    pub setup_scripts_timed_out: usize,
522
523    /// The number of tests that passed. Includes `passed_slow`, `flaky` and `leaky`.
524    pub passed: usize,
525
526    /// The number of slow tests that passed.
527    pub passed_slow: usize,
528
529    /// The number of tests that passed on retry.
530    pub flaky: usize,
531
532    /// The number of tests that failed.
533    pub failed: usize,
534
535    /// The number of failed tests that were slow.
536    pub failed_slow: usize,
537
538    /// The number of tests that timed out.
539    pub timed_out: usize,
540
541    /// The number of tests that passed but leaked handles.
542    pub leaky: usize,
543
544    /// The number of tests that otherwise passed, but leaked handles and were
545    /// treated as failed as a result.
546    pub leaky_failed: usize,
547
548    /// The number of tests that encountered an execution failure.
549    pub exec_failed: usize,
550
551    /// The number of tests that were skipped.
552    pub skipped: usize,
553
554    /// If the run is cancelled, the reason the cancellation is happening.
555    pub cancel_reason: Option<CancelReason>,
556}
557
558impl RunStats {
559    /// Returns true if there are any failures recorded in the stats.
560    pub fn has_failures(&self) -> bool {
561        self.failed_setup_script_count() > 0 || self.failed_count() > 0
562    }
563
564    /// Returns count of setup scripts that did not pass.
565    pub fn failed_setup_script_count(&self) -> usize {
566        self.setup_scripts_failed + self.setup_scripts_exec_failed + self.setup_scripts_timed_out
567    }
568
569    /// Returns count of tests that did not pass.
570    pub fn failed_count(&self) -> usize {
571        self.failed + self.exec_failed + self.timed_out
572    }
573
574    /// Summarizes the stats as an enum at the end of a test run.
575    pub fn summarize_final(&self) -> FinalRunStats {
576        // Check for failures first. The order of setup scripts vs tests should
577        // not be important, though we don't assert that here.
578        if self.failed_setup_script_count() > 0 {
579            // Is this related to a cancellation other than one directly caused
580            // by the failure?
581            if self.cancel_reason > Some(CancelReason::TestFailure) {
582                FinalRunStats::Cancelled {
583                    reason: self.cancel_reason,
584                    kind: RunStatsFailureKind::SetupScript,
585                }
586            } else {
587                FinalRunStats::Failed(RunStatsFailureKind::SetupScript)
588            }
589        } else if self.setup_scripts_initial_count > self.setup_scripts_finished_count {
590            FinalRunStats::Cancelled {
591                reason: self.cancel_reason,
592                kind: RunStatsFailureKind::SetupScript,
593            }
594        } else if self.failed_count() > 0 {
595            let kind = RunStatsFailureKind::Test {
596                initial_run_count: self.initial_run_count,
597                not_run: self.initial_run_count.saturating_sub(self.finished_count),
598            };
599
600            // Is this related to a cancellation other than one directly caused
601            // by the failure?
602            if self.cancel_reason > Some(CancelReason::TestFailure) {
603                FinalRunStats::Cancelled {
604                    reason: self.cancel_reason,
605                    kind,
606                }
607            } else {
608                FinalRunStats::Failed(kind)
609            }
610        } else if self.initial_run_count > self.finished_count {
611            FinalRunStats::Cancelled {
612                reason: self.cancel_reason,
613                kind: RunStatsFailureKind::Test {
614                    initial_run_count: self.initial_run_count,
615                    not_run: self.initial_run_count.saturating_sub(self.finished_count),
616                },
617            }
618        } else if self.finished_count == 0 {
619            FinalRunStats::NoTestsRun
620        } else {
621            FinalRunStats::Success
622        }
623    }
624
625    pub(crate) fn on_setup_script_finished(&mut self, status: &SetupScriptExecuteStatus) {
626        self.setup_scripts_finished_count += 1;
627
628        match status.result {
629            ExecutionResult::Pass
630            | ExecutionResult::Leak {
631                result: LeakTimeoutResult::Pass,
632            } => {
633                self.setup_scripts_passed += 1;
634            }
635            ExecutionResult::Fail { .. }
636            | ExecutionResult::Leak {
637                result: LeakTimeoutResult::Fail,
638            } => {
639                self.setup_scripts_failed += 1;
640            }
641            ExecutionResult::ExecFail => {
642                self.setup_scripts_exec_failed += 1;
643            }
644            ExecutionResult::Timeout => {
645                self.setup_scripts_timed_out += 1;
646            }
647        }
648    }
649
650    pub(crate) fn on_test_finished(&mut self, run_statuses: &ExecutionStatuses) {
651        self.finished_count += 1;
652        // run_statuses is guaranteed to have at least one element.
653        // * If the last element is success, treat it as success (and possibly flaky).
654        // * If the last element is a failure, use it to determine fail/exec fail.
655        // Note that this is different from what Maven Surefire does (use the first failure):
656        // https://maven.apache.org/surefire/maven-surefire-plugin/examples/rerun-failing-tests.html
657        //
658        // This is not likely to matter much in practice since failures are likely to be of the
659        // same type.
660        let last_status = run_statuses.last_status();
661        match last_status.result {
662            ExecutionResult::Pass => {
663                self.passed += 1;
664                if last_status.is_slow {
665                    self.passed_slow += 1;
666                }
667                if run_statuses.len() > 1 {
668                    self.flaky += 1;
669                }
670            }
671            ExecutionResult::Leak {
672                result: LeakTimeoutResult::Pass,
673            } => {
674                self.passed += 1;
675                self.leaky += 1;
676                if last_status.is_slow {
677                    self.passed_slow += 1;
678                }
679                if run_statuses.len() > 1 {
680                    self.flaky += 1;
681                }
682            }
683            ExecutionResult::Leak {
684                result: LeakTimeoutResult::Fail,
685            } => {
686                self.failed += 1;
687                self.leaky_failed += 1;
688                if last_status.is_slow {
689                    self.failed_slow += 1;
690                }
691            }
692            ExecutionResult::Fail { .. } => {
693                self.failed += 1;
694                if last_status.is_slow {
695                    self.failed_slow += 1;
696                }
697            }
698            ExecutionResult::Timeout => self.timed_out += 1,
699            ExecutionResult::ExecFail => self.exec_failed += 1,
700        }
701    }
702}
703
704/// A type summarizing the possible outcomes of a test run.
705#[derive(Copy, Clone, Debug, Eq, PartialEq)]
706pub enum FinalRunStats {
707    /// The test run was successful, or is successful so far.
708    Success,
709
710    /// The test run was successful, or is successful so far, but no tests were selected to run.
711    NoTestsRun,
712
713    /// The test run was cancelled.
714    Cancelled {
715        /// The reason for cancellation, if available.
716        ///
717        /// This should generally be available, but may be None if some tests
718        /// that were selected to run were not executed.
719        reason: Option<CancelReason>,
720
721        /// The kind of failure that occurred.
722        kind: RunStatsFailureKind,
723    },
724
725    /// At least one test failed.
726    Failed(RunStatsFailureKind),
727}
728
729/// Statistics for a stress run.
730#[derive(Clone, Debug)]
731pub struct StressRunStats {
732    /// The number of stress runs completed.
733    pub completed: StressIndex,
734
735    /// The number of stress runs that succeeded.
736    pub success_count: u32,
737
738    /// The number of stress runs that failed.
739    pub failed_count: u32,
740
741    /// The last stress run's `FinalRunStats`.
742    pub last_final_stats: FinalRunStats,
743}
744
745impl StressRunStats {
746    /// Summarizes the stats as an enum at the end of a test run.
747    pub fn summarize_final(&self) -> StressFinalRunStats {
748        if self.failed_count > 0 {
749            StressFinalRunStats::Failed
750        } else if matches!(self.last_final_stats, FinalRunStats::Cancelled { .. }) {
751            StressFinalRunStats::Cancelled
752        } else if matches!(self.last_final_stats, FinalRunStats::NoTestsRun) {
753            StressFinalRunStats::NoTestsRun
754        } else {
755            StressFinalRunStats::Success
756        }
757    }
758}
759
760/// A summary of final statistics for a stress run.
761pub enum StressFinalRunStats {
762    /// The stress run was successful.
763    Success,
764
765    /// No tests were run.
766    NoTestsRun,
767
768    /// The stress run was cancelled.
769    Cancelled,
770
771    /// At least one stress run failed.
772    Failed,
773}
774
775/// A type summarizing the step at which a test run failed.
776#[derive(Copy, Clone, Debug, Eq, PartialEq)]
777pub enum RunStatsFailureKind {
778    /// The run was interrupted during setup script execution.
779    SetupScript,
780
781    /// The run was interrupted during test execution.
782    Test {
783        /// The total number of tests scheduled.
784        initial_run_count: usize,
785
786        /// The number of tests not run, or for a currently-executing test the number queued up to
787        /// run.
788        not_run: usize,
789    },
790}
791
792/// Information about executions of a test, including retries.
793#[derive(Clone, Debug)]
794pub struct ExecutionStatuses {
795    /// This is guaranteed to be non-empty.
796    statuses: Vec<ExecuteStatus>,
797}
798
799#[expect(clippy::len_without_is_empty)] // RunStatuses is never empty
800impl ExecutionStatuses {
801    pub(crate) fn new(statuses: Vec<ExecuteStatus>) -> Self {
802        Self { statuses }
803    }
804
805    /// Returns the last execution status.
806    ///
807    /// This status is typically used as the final result.
808    pub fn last_status(&self) -> &ExecuteStatus {
809        self.statuses
810            .last()
811            .expect("execution statuses is non-empty")
812    }
813
814    /// Iterates over all the statuses.
815    pub fn iter(&self) -> impl DoubleEndedIterator<Item = &'_ ExecuteStatus> + '_ {
816        self.statuses.iter()
817    }
818
819    /// Returns the number of times the test was executed.
820    pub fn len(&self) -> usize {
821        self.statuses.len()
822    }
823
824    /// Returns a description of self.
825    pub fn describe(&self) -> ExecutionDescription<'_> {
826        let last_status = self.last_status();
827        if last_status.result.is_success() {
828            if self.statuses.len() > 1 {
829                ExecutionDescription::Flaky {
830                    last_status,
831                    prior_statuses: &self.statuses[..self.statuses.len() - 1],
832                }
833            } else {
834                ExecutionDescription::Success {
835                    single_status: last_status,
836                }
837            }
838        } else {
839            let first_status = self
840                .statuses
841                .first()
842                .expect("execution statuses is non-empty");
843            let retries = &self.statuses[1..];
844            ExecutionDescription::Failure {
845                first_status,
846                last_status,
847                retries,
848            }
849        }
850    }
851}
852
853/// A description of test executions obtained from `ExecuteStatuses`.
854///
855/// This can be used to quickly determine whether a test passed, failed or was flaky.
856#[derive(Copy, Clone, Debug)]
857pub enum ExecutionDescription<'a> {
858    /// The test was run once and was successful.
859    Success {
860        /// The status of the test.
861        single_status: &'a ExecuteStatus,
862    },
863
864    /// The test was run more than once. The final result was successful.
865    Flaky {
866        /// The last, successful status.
867        last_status: &'a ExecuteStatus,
868
869        /// Previous statuses, none of which are successes.
870        prior_statuses: &'a [ExecuteStatus],
871    },
872
873    /// The test was run once, or possibly multiple times. All runs failed.
874    Failure {
875        /// The first, failing status.
876        first_status: &'a ExecuteStatus,
877
878        /// The last, failing status. Same as the first status if no retries were performed.
879        last_status: &'a ExecuteStatus,
880
881        /// Any retries that were performed. All of these runs failed.
882        ///
883        /// May be empty.
884        retries: &'a [ExecuteStatus],
885    },
886}
887
888impl<'a> ExecutionDescription<'a> {
889    /// Returns the status level for this `ExecutionDescription`.
890    pub fn status_level(&self) -> StatusLevel {
891        match self {
892            ExecutionDescription::Success { single_status } => match single_status.result {
893                ExecutionResult::Leak {
894                    result: LeakTimeoutResult::Pass,
895                } => StatusLevel::Leak,
896                ExecutionResult::Pass => StatusLevel::Pass,
897                other => unreachable!("Success only permits Pass or Leak Pass, found {other:?}"),
898            },
899            // A flaky test implies that we print out retry information for it.
900            ExecutionDescription::Flaky { .. } => StatusLevel::Retry,
901            ExecutionDescription::Failure { .. } => StatusLevel::Fail,
902        }
903    }
904
905    /// Returns the final status level for this `ExecutionDescription`.
906    pub fn final_status_level(&self) -> FinalStatusLevel {
907        match self {
908            ExecutionDescription::Success { single_status, .. } => {
909                // Slow is higher priority than leaky, so return slow first here.
910                if single_status.is_slow {
911                    FinalStatusLevel::Slow
912                } else {
913                    match single_status.result {
914                        ExecutionResult::Pass => FinalStatusLevel::Pass,
915                        ExecutionResult::Leak {
916                            result: LeakTimeoutResult::Pass,
917                        } => FinalStatusLevel::Leak,
918                        other => {
919                            unreachable!("Success only permits Pass or Leak Pass, found {other:?}")
920                        }
921                    }
922                }
923            }
924            // A flaky test implies that we print out retry information for it.
925            ExecutionDescription::Flaky { .. } => FinalStatusLevel::Flaky,
926            ExecutionDescription::Failure { .. } => FinalStatusLevel::Fail,
927        }
928    }
929
930    /// Returns the last run status.
931    pub fn last_status(&self) -> &'a ExecuteStatus {
932        match self {
933            ExecutionDescription::Success {
934                single_status: last_status,
935            }
936            | ExecutionDescription::Flaky { last_status, .. }
937            | ExecutionDescription::Failure { last_status, .. } => last_status,
938        }
939    }
940}
941
942/// Information about a single execution of a test.
943#[derive(Clone, Debug)]
944pub struct ExecuteStatus {
945    /// Retry-related data.
946    pub retry_data: RetryData,
947    /// The stdout and stderr output for this test.
948    pub output: ChildExecutionOutput,
949    /// The execution result for this test: pass, fail or execution error.
950    pub result: ExecutionResult,
951    /// The time at which the test started.
952    pub start_time: DateTime<FixedOffset>,
953    /// The time it took for the test to run.
954    pub time_taken: Duration,
955    /// Whether this test counts as slow.
956    pub is_slow: bool,
957    /// The delay will be non-zero if this is a retry and delay was specified.
958    pub delay_before_start: Duration,
959}
960
961/// Information about the execution of a setup script.
962#[derive(Clone, Debug)]
963pub struct SetupScriptExecuteStatus {
964    /// Output for this setup script.
965    pub output: ChildExecutionOutput,
966
967    /// The execution result for this setup script: pass, fail or execution error.
968    pub result: ExecutionResult,
969
970    /// The time at which the script started.
971    pub start_time: DateTime<FixedOffset>,
972
973    /// The time it took for the script to run.
974    pub time_taken: Duration,
975
976    /// Whether this script counts as slow.
977    pub is_slow: bool,
978
979    /// The map of environment variables that were set by this script.
980    ///
981    /// `None` if an error occurred while running the script or reading the
982    /// environment map.
983    pub env_map: Option<SetupScriptEnvMap>,
984}
985
986/// A map of environment variables set by a setup script.
987///
988/// Part of [`SetupScriptExecuteStatus`].
989#[derive(Clone, Debug)]
990pub struct SetupScriptEnvMap {
991    /// The map of environment variables set by the script.
992    pub env_map: BTreeMap<String, String>,
993}
994
995/// Data related to retries for a test.
996#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)]
997pub struct RetryData {
998    /// The current attempt. In the range `[1, total_attempts]`.
999    pub attempt: u32,
1000
1001    /// The total number of times this test can be run. Equal to `1 + retries`.
1002    pub total_attempts: u32,
1003}
1004
1005impl RetryData {
1006    /// Returns true if there are no more attempts after this.
1007    pub fn is_last_attempt(&self) -> bool {
1008        self.attempt >= self.total_attempts
1009    }
1010}
1011
1012/// Whether a test passed, failed or an error occurred while executing the test.
1013#[derive(Copy, Clone, Debug, Eq, PartialEq)]
1014pub enum ExecutionResult {
1015    /// The test passed.
1016    Pass,
1017    /// The test passed but leaked handles. This usually indicates that
1018    /// a subprocess that inherit standard IO was created, but it didn't shut down when
1019    /// the test failed.
1020    Leak {
1021        /// Whether this leak was treated as a failure.
1022        ///
1023        /// Note the difference between `Fail { leaked: true }` and `Leak {
1024        /// failed: true }`. In the former case, the test failed and also leaked
1025        /// handles. In the latter case, the test passed but leaked handles, and
1026        /// configuration indicated that this is a failure.
1027        result: LeakTimeoutResult,
1028    },
1029    /// The test failed.
1030    Fail {
1031        /// The abort status of the test, if any (for example, the signal on Unix).
1032        failure_status: FailureStatus,
1033
1034        /// Whether a test leaked handles. If set to true, this usually indicates that
1035        /// a subprocess that inherit standard IO was created, but it didn't shut down when
1036        /// the test failed.
1037        leaked: bool,
1038    },
1039    /// An error occurred while executing the test.
1040    ExecFail,
1041    /// The test was terminated due to a timeout.
1042    Timeout,
1043}
1044
1045impl ExecutionResult {
1046    /// Returns true if the test was successful.
1047    pub fn is_success(self) -> bool {
1048        match self {
1049            ExecutionResult::Pass
1050            | ExecutionResult::Leak {
1051                result: LeakTimeoutResult::Pass,
1052            } => true,
1053            ExecutionResult::Leak {
1054                result: LeakTimeoutResult::Fail,
1055            }
1056            | ExecutionResult::Fail { .. }
1057            | ExecutionResult::ExecFail
1058            | ExecutionResult::Timeout => false,
1059        }
1060    }
1061
1062    /// Returns a static string representation of the result.
1063    pub fn as_static_str(&self) -> &'static str {
1064        match self {
1065            ExecutionResult::Pass => "pass",
1066            ExecutionResult::Leak { .. } => "leak",
1067            ExecutionResult::Fail { .. } => "fail",
1068            ExecutionResult::ExecFail => "exec-fail",
1069            ExecutionResult::Timeout => "timeout",
1070        }
1071    }
1072}
1073
1074/// Failure status: either an exit code or an abort status.
1075#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1076pub enum FailureStatus {
1077    /// The test exited with a non-zero exit code.
1078    ExitCode(i32),
1079
1080    /// The test aborted.
1081    Abort(AbortStatus),
1082}
1083
1084impl FailureStatus {
1085    /// Extract the failure status from an `ExitStatus`.
1086    pub fn extract(exit_status: ExitStatus) -> Self {
1087        if let Some(abort_status) = AbortStatus::extract(exit_status) {
1088            FailureStatus::Abort(abort_status)
1089        } else {
1090            FailureStatus::ExitCode(
1091                exit_status
1092                    .code()
1093                    .expect("if abort_status is None, then code must be present"),
1094            )
1095        }
1096    }
1097}
1098
1099/// A regular exit code or Windows NT abort status for a test.
1100///
1101/// Returned as part of the [`ExecutionResult::Fail`] variant.
1102#[derive(Copy, Clone, Eq, PartialEq)]
1103pub enum AbortStatus {
1104    /// The test was aborted due to a signal on Unix.
1105    #[cfg(unix)]
1106    UnixSignal(i32),
1107
1108    /// The test was determined to have aborted because the high bit was set on Windows.
1109    #[cfg(windows)]
1110    WindowsNtStatus(windows_sys::Win32::Foundation::NTSTATUS),
1111
1112    /// The test was terminated via job object on Windows.
1113    #[cfg(windows)]
1114    JobObject,
1115}
1116
1117impl AbortStatus {
1118    /// Extract the abort status from an [`ExitStatus`].
1119    pub fn extract(exit_status: ExitStatus) -> Option<Self> {
1120        cfg_if::cfg_if! {
1121            if #[cfg(unix)] {
1122                // On Unix, extract the signal if it's found.
1123                use std::os::unix::process::ExitStatusExt;
1124                exit_status.signal().map(AbortStatus::UnixSignal)
1125            } else if #[cfg(windows)] {
1126                exit_status.code().and_then(|code| {
1127                    (code < 0).then_some(AbortStatus::WindowsNtStatus(code))
1128                })
1129            } else {
1130                None
1131            }
1132        }
1133    }
1134}
1135
1136impl fmt::Debug for AbortStatus {
1137    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1138        match self {
1139            #[cfg(unix)]
1140            AbortStatus::UnixSignal(signal) => write!(f, "UnixSignal({signal})"),
1141            #[cfg(windows)]
1142            AbortStatus::WindowsNtStatus(status) => write!(f, "WindowsNtStatus({status:x})"),
1143            #[cfg(windows)]
1144            AbortStatus::JobObject => write!(f, "JobObject"),
1145        }
1146    }
1147}
1148
1149// Note: the order here matters -- it indicates severity of cancellation
1150/// The reason why a test run is being cancelled.
1151#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1152#[cfg_attr(test, derive(test_strategy::Arbitrary))]
1153pub enum CancelReason {
1154    /// A setup script failed.
1155    SetupScriptFailure,
1156
1157    /// A test failed and --no-fail-fast wasn't specified.
1158    TestFailure,
1159
1160    /// An error occurred while reporting results.
1161    ReportError,
1162
1163    /// The global timeout was exceeded.
1164    GlobalTimeout,
1165
1166    /// A termination signal (on Unix, SIGTERM or SIGHUP) was received.
1167    Signal,
1168
1169    /// An interrupt (on Unix, Ctrl-C) was received.
1170    Interrupt,
1171
1172    /// A second signal was received, and the run is being forcibly killed.
1173    SecondSignal,
1174}
1175
1176impl CancelReason {
1177    pub(crate) fn to_static_str(self) -> &'static str {
1178        match self {
1179            CancelReason::SetupScriptFailure => "setup script failure",
1180            CancelReason::TestFailure => "test failure",
1181            CancelReason::ReportError => "reporting error",
1182            CancelReason::GlobalTimeout => "global timeout",
1183            CancelReason::Signal => "signal",
1184            CancelReason::Interrupt => "interrupt",
1185            CancelReason::SecondSignal => "second signal",
1186        }
1187    }
1188}
1189/// The kind of unit of work that nextest is executing.
1190#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1191pub enum UnitKind {
1192    /// A test.
1193    Test,
1194
1195    /// A script (e.g. a setup script).
1196    Script,
1197}
1198
1199impl UnitKind {
1200    pub(crate) const WAITING_ON_TEST_MESSAGE: &str = "waiting on test process";
1201    pub(crate) const WAITING_ON_SCRIPT_MESSAGE: &str = "waiting on script process";
1202
1203    pub(crate) const EXECUTING_TEST_MESSAGE: &str = "executing test";
1204    pub(crate) const EXECUTING_SCRIPT_MESSAGE: &str = "executing script";
1205
1206    pub(crate) fn waiting_on_message(&self) -> &'static str {
1207        match self {
1208            UnitKind::Test => Self::WAITING_ON_TEST_MESSAGE,
1209            UnitKind::Script => Self::WAITING_ON_SCRIPT_MESSAGE,
1210        }
1211    }
1212
1213    pub(crate) fn executing_message(&self) -> &'static str {
1214        match self {
1215            UnitKind::Test => Self::EXECUTING_TEST_MESSAGE,
1216            UnitKind::Script => Self::EXECUTING_SCRIPT_MESSAGE,
1217        }
1218    }
1219}
1220
1221impl fmt::Display for UnitKind {
1222    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1223        match self {
1224            UnitKind::Script => write!(f, "script"),
1225            UnitKind::Test => write!(f, "test"),
1226        }
1227    }
1228}
1229
1230/// A response to an information request.
1231#[derive(Clone, Debug)]
1232pub enum InfoResponse<'a> {
1233    /// A setup script's response.
1234    SetupScript(SetupScriptInfoResponse<'a>),
1235
1236    /// A test's response.
1237    Test(TestInfoResponse<'a>),
1238}
1239
1240/// A setup script's response to an information request.
1241#[derive(Clone, Debug)]
1242pub struct SetupScriptInfoResponse<'a> {
1243    /// The stress index of the setup script.
1244    pub stress_index: Option<StressIndex>,
1245
1246    /// The identifier of the setup script instance.
1247    pub script_id: ScriptId,
1248
1249    /// The program to run.
1250    pub program: String,
1251
1252    /// The list of arguments to the program.
1253    pub args: &'a [String],
1254
1255    /// The state of the setup script.
1256    pub state: UnitState,
1257
1258    /// Output obtained from the setup script.
1259    pub output: ChildExecutionOutput,
1260}
1261
1262/// A test's response to an information request.
1263#[derive(Clone, Debug)]
1264pub struct TestInfoResponse<'a> {
1265    /// The stress index of the test.
1266    pub stress_index: Option<StressIndex>,
1267
1268    /// The test instance that the information is about.
1269    pub test_instance: TestInstanceId<'a>,
1270
1271    /// Information about retries.
1272    pub retry_data: RetryData,
1273
1274    /// The state of the test.
1275    pub state: UnitState,
1276
1277    /// Output obtained from the test.
1278    pub output: ChildExecutionOutput,
1279}
1280
1281/// The current state of a test or script process: running, exiting, or
1282/// terminating.
1283///
1284/// Part of information response requests.
1285#[derive(Clone, Debug)]
1286pub enum UnitState {
1287    /// The unit is currently running.
1288    Running {
1289        /// The process ID.
1290        pid: u32,
1291
1292        /// The amount of time the unit has been running.
1293        time_taken: Duration,
1294
1295        /// `Some` if the test is marked as slow, along with the duration after
1296        /// which it was marked as slow.
1297        slow_after: Option<Duration>,
1298    },
1299
1300    /// The test has finished running, and is currently in the process of
1301    /// exiting.
1302    Exiting {
1303        /// The process ID.
1304        pid: u32,
1305
1306        /// The amount of time the unit ran for.
1307        time_taken: Duration,
1308
1309        /// `Some` if the unit is marked as slow, along with the duration after
1310        /// which it was marked as slow.
1311        slow_after: Option<Duration>,
1312
1313        /// The tentative execution result before leaked status is determined.
1314        ///
1315        /// None means that the exit status could not be read, and should be
1316        /// treated as a failure.
1317        tentative_result: Option<ExecutionResult>,
1318
1319        /// How long has been spent waiting for the process to exit.
1320        waiting_duration: Duration,
1321
1322        /// How much longer nextest will wait until the test is marked leaky.
1323        remaining: Duration,
1324    },
1325
1326    /// The child process is being terminated by nextest.
1327    Terminating(UnitTerminatingState),
1328
1329    /// The unit has finished running and the process has exited.
1330    Exited {
1331        /// The result of executing the unit.
1332        result: ExecutionResult,
1333
1334        /// The amount of time the unit ran for.
1335        time_taken: Duration,
1336
1337        /// `Some` if the unit is marked as slow, along with the duration after
1338        /// which it was marked as slow.
1339        slow_after: Option<Duration>,
1340    },
1341
1342    /// A delay is being waited out before the next attempt of the test is
1343    /// started. (Only relevant for tests.)
1344    DelayBeforeNextAttempt {
1345        /// The previous execution result.
1346        previous_result: ExecutionResult,
1347
1348        /// Whether the previous attempt was marked as slow.
1349        previous_slow: bool,
1350
1351        /// How long has been spent waiting so far.
1352        waiting_duration: Duration,
1353
1354        /// How much longer nextest will wait until retrying the test.
1355        remaining: Duration,
1356    },
1357}
1358
1359impl UnitState {
1360    /// Returns true if the state has a valid output attached to it.
1361    pub fn has_valid_output(&self) -> bool {
1362        match self {
1363            UnitState::Running { .. }
1364            | UnitState::Exiting { .. }
1365            | UnitState::Terminating(_)
1366            | UnitState::Exited { .. } => true,
1367            UnitState::DelayBeforeNextAttempt { .. } => false,
1368        }
1369    }
1370}
1371
1372/// The current terminating state of a test or script process.
1373///
1374/// Part of [`UnitState::Terminating`].
1375#[derive(Clone, Debug)]
1376pub struct UnitTerminatingState {
1377    /// The process ID.
1378    pub pid: u32,
1379
1380    /// The amount of time the unit ran for.
1381    pub time_taken: Duration,
1382
1383    /// The reason for the termination.
1384    pub reason: UnitTerminateReason,
1385
1386    /// The method by which the process is being terminated.
1387    pub method: UnitTerminateMethod,
1388
1389    /// How long has been spent waiting for the process to exit.
1390    pub waiting_duration: Duration,
1391
1392    /// How much longer nextest will wait until a kill command is sent to the process.
1393    pub remaining: Duration,
1394}
1395
1396/// The reason for a script or test being forcibly terminated by nextest.
1397///
1398/// Part of information response requests.
1399#[derive(Clone, Copy, Debug)]
1400pub enum UnitTerminateReason {
1401    /// The unit is being terminated due to a test timeout being hit.
1402    Timeout,
1403
1404    /// The unit is being terminated due to nextest receiving a signal.
1405    Signal,
1406
1407    /// The unit is being terminated due to an interrupt (i.e. Ctrl-C).
1408    Interrupt,
1409}
1410
1411impl fmt::Display for UnitTerminateReason {
1412    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1413        match self {
1414            UnitTerminateReason::Timeout => write!(f, "timeout"),
1415            UnitTerminateReason::Signal => write!(f, "signal"),
1416            UnitTerminateReason::Interrupt => write!(f, "interrupt"),
1417        }
1418    }
1419}
1420
1421/// The way in which a script or test is being forcibly terminated by nextest.
1422#[derive(Clone, Copy, Debug)]
1423pub enum UnitTerminateMethod {
1424    /// The unit is being terminated by sending a signal.
1425    #[cfg(unix)]
1426    Signal(UnitTerminateSignal),
1427
1428    /// The unit is being terminated by terminating the Windows job object.
1429    #[cfg(windows)]
1430    JobObject,
1431
1432    /// The unit is being waited on to exit. A termination signal will be sent
1433    /// if it doesn't exit within the grace period.
1434    ///
1435    /// On Windows, this occurs when nextest receives Ctrl-C. In that case, it
1436    /// is assumed that tests will also receive Ctrl-C and exit on their own. If
1437    /// tests do not exit within the grace period configured for them, their
1438    /// corresponding job objects will be terminated.
1439    #[cfg(windows)]
1440    Wait,
1441
1442    /// A fake method used for testing.
1443    #[cfg(test)]
1444    Fake,
1445}
1446
1447#[cfg(unix)]
1448/// The signal that is or was sent to terminate a script or test.
1449#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1450pub enum UnitTerminateSignal {
1451    /// The unit is being terminated by sending a SIGINT.
1452    Interrupt,
1453
1454    /// The unit is being terminated by sending a SIGTERM signal.
1455    Term,
1456
1457    /// The unit is being terminated by sending a SIGHUP signal.
1458    Hangup,
1459
1460    /// The unit is being terminated by sending a SIGQUIT signal.
1461    Quit,
1462
1463    /// The unit is being terminated by sending a SIGKILL signal.
1464    Kill,
1465}
1466
1467#[cfg(unix)]
1468impl fmt::Display for UnitTerminateSignal {
1469    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1470        match self {
1471            UnitTerminateSignal::Interrupt => write!(f, "SIGINT"),
1472            UnitTerminateSignal::Term => write!(f, "SIGTERM"),
1473            UnitTerminateSignal::Hangup => write!(f, "SIGHUP"),
1474            UnitTerminateSignal::Quit => write!(f, "SIGQUIT"),
1475            UnitTerminateSignal::Kill => write!(f, "SIGKILL"),
1476        }
1477    }
1478}
1479
1480#[cfg(test)]
1481mod tests {
1482    use super::*;
1483
1484    #[test]
1485    fn test_is_success() {
1486        assert_eq!(
1487            RunStats::default().summarize_final(),
1488            FinalRunStats::NoTestsRun,
1489            "empty run => no tests run"
1490        );
1491        assert_eq!(
1492            RunStats {
1493                initial_run_count: 42,
1494                finished_count: 42,
1495                ..RunStats::default()
1496            }
1497            .summarize_final(),
1498            FinalRunStats::Success,
1499            "initial run count = final run count => success"
1500        );
1501        assert_eq!(
1502            RunStats {
1503                initial_run_count: 42,
1504                finished_count: 41,
1505                ..RunStats::default()
1506            }
1507            .summarize_final(),
1508            FinalRunStats::Cancelled {
1509                reason: None,
1510                kind: RunStatsFailureKind::Test {
1511                    initial_run_count: 42,
1512                    not_run: 1
1513                }
1514            },
1515            "initial run count > final run count => cancelled"
1516        );
1517        assert_eq!(
1518            RunStats {
1519                initial_run_count: 42,
1520                finished_count: 42,
1521                failed: 1,
1522                ..RunStats::default()
1523            }
1524            .summarize_final(),
1525            FinalRunStats::Failed(RunStatsFailureKind::Test {
1526                initial_run_count: 42,
1527                not_run: 0
1528            }),
1529            "failed => failure"
1530        );
1531        assert_eq!(
1532            RunStats {
1533                initial_run_count: 42,
1534                finished_count: 42,
1535                exec_failed: 1,
1536                ..RunStats::default()
1537            }
1538            .summarize_final(),
1539            FinalRunStats::Failed(RunStatsFailureKind::Test {
1540                initial_run_count: 42,
1541                not_run: 0
1542            }),
1543            "exec failed => failure"
1544        );
1545        assert_eq!(
1546            RunStats {
1547                initial_run_count: 42,
1548                finished_count: 42,
1549                timed_out: 1,
1550                ..RunStats::default()
1551            }
1552            .summarize_final(),
1553            FinalRunStats::Failed(RunStatsFailureKind::Test {
1554                initial_run_count: 42,
1555                not_run: 0
1556            }),
1557            "timed out => failure"
1558        );
1559        assert_eq!(
1560            RunStats {
1561                initial_run_count: 42,
1562                finished_count: 42,
1563                skipped: 1,
1564                ..RunStats::default()
1565            }
1566            .summarize_final(),
1567            FinalRunStats::Success,
1568            "skipped => not considered a failure"
1569        );
1570
1571        assert_eq!(
1572            RunStats {
1573                setup_scripts_initial_count: 2,
1574                setup_scripts_finished_count: 1,
1575                ..RunStats::default()
1576            }
1577            .summarize_final(),
1578            FinalRunStats::Cancelled {
1579                reason: None,
1580                kind: RunStatsFailureKind::SetupScript,
1581            },
1582            "setup script failed => failure"
1583        );
1584
1585        assert_eq!(
1586            RunStats {
1587                setup_scripts_initial_count: 2,
1588                setup_scripts_finished_count: 2,
1589                setup_scripts_failed: 1,
1590                ..RunStats::default()
1591            }
1592            .summarize_final(),
1593            FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1594            "setup script failed => failure"
1595        );
1596        assert_eq!(
1597            RunStats {
1598                setup_scripts_initial_count: 2,
1599                setup_scripts_finished_count: 2,
1600                setup_scripts_exec_failed: 1,
1601                ..RunStats::default()
1602            }
1603            .summarize_final(),
1604            FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1605            "setup script exec failed => failure"
1606        );
1607        assert_eq!(
1608            RunStats {
1609                setup_scripts_initial_count: 2,
1610                setup_scripts_finished_count: 2,
1611                setup_scripts_timed_out: 1,
1612                ..RunStats::default()
1613            }
1614            .summarize_final(),
1615            FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1616            "setup script timed out => failure"
1617        );
1618        assert_eq!(
1619            RunStats {
1620                setup_scripts_initial_count: 2,
1621                setup_scripts_finished_count: 2,
1622                setup_scripts_passed: 2,
1623                ..RunStats::default()
1624            }
1625            .summarize_final(),
1626            FinalRunStats::NoTestsRun,
1627            "setup scripts passed => success, but no tests run"
1628        );
1629    }
1630}