nextest_runner/reporter/
events.rs

1// Copyright (c) The nextest Contributors
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Events for the reporter.
5//!
6//! These types form the interface between the test runner and the test
7//! reporter. The root structure for all events is [`TestEvent`].
8
9use super::{FinalStatusLevel, StatusLevel, TestOutputDisplay};
10use crate::{
11    config::{elements::LeakTimeoutResult, scripts::ScriptId},
12    list::{TestInstance, TestInstanceId, TestList},
13    runner::{StressCondition, StressCount},
14    test_output::ChildExecutionOutput,
15};
16use chrono::{DateTime, FixedOffset};
17use nextest_metadata::MismatchReason;
18use quick_junit::ReportUuid;
19use std::{collections::BTreeMap, fmt, num::NonZero, process::ExitStatus, time::Duration};
20
21/// A test event.
22///
23/// Events are produced by a [`TestRunner`](crate::runner::TestRunner) and
24/// consumed by a [`Reporter`](crate::reporter::Reporter).
25#[derive(Clone, Debug)]
26pub struct TestEvent<'a> {
27    /// The time at which the event was generated, including the offset from UTC.
28    pub timestamp: DateTime<FixedOffset>,
29
30    /// The amount of time elapsed since the start of the test run.
31    pub elapsed: Duration,
32
33    /// The kind of test event this is.
34    pub kind: TestEventKind<'a>,
35}
36
37/// The kind of test event this is.
38///
39/// Forms part of [`TestEvent`].
40#[derive(Clone, Debug)]
41pub enum TestEventKind<'a> {
42    /// The test run started.
43    RunStarted {
44        /// The list of tests that will be run.
45        ///
46        /// The methods on the test list indicate the number of tests that will be run.
47        test_list: &'a TestList<'a>,
48
49        /// The UUID for this run.
50        run_id: ReportUuid,
51
52        /// The nextest profile chosen for this run.
53        profile_name: String,
54
55        /// The command-line arguments for the process.
56        cli_args: Vec<String>,
57
58        /// The stress condition for this run, if any.
59        stress_condition: Option<StressCondition>,
60    },
61
62    /// When running stress tests serially, a sub-run started.
63    StressSubRunStarted {
64        /// The amount of progress completed so far.
65        progress: StressProgress,
66    },
67
68    /// A setup script started.
69    SetupScriptStarted {
70        /// If a stress test is being run, the stress index, starting from 0.
71        stress_index: Option<StressIndex>,
72
73        /// The setup script index.
74        index: usize,
75
76        /// The total number of setup scripts.
77        total: usize,
78
79        /// The script ID.
80        script_id: ScriptId,
81
82        /// The program to run.
83        program: String,
84
85        /// The arguments to the program.
86        args: &'a [String],
87
88        /// True if some output from the setup script is being passed through.
89        no_capture: bool,
90    },
91
92    /// A setup script was slow.
93    SetupScriptSlow {
94        /// If a stress test is being run, the stress index, starting from 0.
95        stress_index: Option<StressIndex>,
96
97        /// The script ID.
98        script_id: ScriptId,
99
100        /// The program to run.
101        program: String,
102
103        /// The arguments to the program.
104        args: &'a [String],
105
106        /// The amount of time elapsed since the start of execution.
107        elapsed: Duration,
108
109        /// True if the script has hit its timeout and is about to be terminated.
110        will_terminate: bool,
111    },
112
113    /// A setup script completed execution.
114    SetupScriptFinished {
115        /// If a stress test is being run, the stress index, starting from 0.
116        stress_index: Option<StressIndex>,
117
118        /// The setup script index.
119        index: usize,
120
121        /// The total number of setup scripts.
122        total: usize,
123
124        /// The script ID.
125        script_id: ScriptId,
126
127        /// The program to run.
128        program: String,
129
130        /// The arguments to the program.
131        args: &'a [String],
132
133        /// Whether the JUnit report should store success output for this script.
134        junit_store_success_output: bool,
135
136        /// Whether the JUnit report should store failure output for this script.
137        junit_store_failure_output: bool,
138
139        /// True if some output from the setup script was passed through.
140        no_capture: bool,
141
142        /// The execution status of the setup script.
143        run_status: SetupScriptExecuteStatus,
144    },
145
146    // TODO: add events for BinaryStarted and BinaryFinished? May want a slightly different way to
147    // do things, maybe a couple of reporter traits (one for the run as a whole and one for each
148    // binary).
149    /// A test started running.
150    TestStarted {
151        /// If a stress test is being run, the stress index, starting from 0.
152        stress_index: Option<StressIndex>,
153
154        /// The test instance that was started.
155        test_instance: TestInstance<'a>,
156
157        /// Current run statistics so far.
158        current_stats: RunStats,
159
160        /// The number of tests currently running, including this one.
161        running: usize,
162    },
163
164    /// A test was slower than a configured soft timeout.
165    TestSlow {
166        /// If a stress test is being run, the stress index, starting from 0.
167        stress_index: Option<StressIndex>,
168
169        /// The test instance that was slow.
170        test_instance: TestInstance<'a>,
171
172        /// Retry data.
173        retry_data: RetryData,
174
175        /// The amount of time that has elapsed since the beginning of the test.
176        elapsed: Duration,
177
178        /// True if the test has hit its timeout and is about to be terminated.
179        will_terminate: bool,
180    },
181
182    /// A test attempt failed and will be retried in the future.
183    ///
184    /// This event does not occur on the final run of a failing test.
185    TestAttemptFailedWillRetry {
186        /// If a stress test is being run, the stress index, starting from 0.
187        stress_index: Option<StressIndex>,
188
189        /// The test instance that is being retried.
190        test_instance: TestInstance<'a>,
191
192        /// The status of this attempt to run the test. Will never be success.
193        run_status: ExecuteStatus,
194
195        /// The delay before the next attempt to run the test.
196        delay_before_next_attempt: Duration,
197
198        /// Whether failure outputs are printed out.
199        failure_output: TestOutputDisplay,
200    },
201
202    /// A retry has started.
203    TestRetryStarted {
204        /// If a stress test is being run, the stress index, starting from 0.
205        stress_index: Option<StressIndex>,
206
207        /// The test instance that is being retried.
208        test_instance: TestInstance<'a>,
209
210        /// Data related to retries.
211        retry_data: RetryData,
212    },
213
214    /// A test finished running.
215    TestFinished {
216        /// If a stress test is being run, the stress index, starting from 0.
217        stress_index: Option<StressIndex>,
218
219        /// The test instance that finished running.
220        test_instance: TestInstance<'a>,
221
222        /// Test setting for success output.
223        success_output: TestOutputDisplay,
224
225        /// Test setting for failure output.
226        failure_output: TestOutputDisplay,
227
228        /// Whether the JUnit report should store success output for this test.
229        junit_store_success_output: bool,
230
231        /// Whether the JUnit report should store failure output for this test.
232        junit_store_failure_output: bool,
233
234        /// Information about all the runs for this test.
235        run_statuses: ExecutionStatuses,
236
237        /// Current statistics for number of tests so far.
238        current_stats: RunStats,
239
240        /// The number of tests that are currently running, excluding this one.
241        running: usize,
242    },
243
244    /// A test was skipped.
245    TestSkipped {
246        /// If a stress test is being run, the stress index, starting from 0.
247        stress_index: Option<StressIndex>,
248
249        /// The test instance that was skipped.
250        test_instance: TestInstance<'a>,
251
252        /// The reason this test was skipped.
253        reason: MismatchReason,
254    },
255
256    /// An information request was received.
257    InfoStarted {
258        /// The number of tasks currently running. This is the same as the
259        /// number of expected responses.
260        total: usize,
261
262        /// Statistics for the run.
263        run_stats: RunStats,
264    },
265
266    /// Information about a script or test was received.
267    InfoResponse {
268        /// The index of the response, starting from 0.
269        index: usize,
270
271        /// The total number of responses expected.
272        total: usize,
273
274        /// The response itself.
275        response: InfoResponse<'a>,
276    },
277
278    /// An information request was completed.
279    InfoFinished {
280        /// The number of responses that were not received. In most cases, this
281        /// is 0.
282        missing: usize,
283    },
284
285    /// `Enter` was pressed. Either a newline or a progress bar snapshot needs
286    /// to be printed.
287    InputEnter {
288        /// Current statistics for number of tests so far.
289        current_stats: RunStats,
290
291        /// The number of tests running.
292        running: usize,
293    },
294
295    /// A cancellation notice was received.
296    RunBeginCancel {
297        /// The number of setup scripts still running.
298        setup_scripts_running: usize,
299
300        /// Current statistics for number of tests so far.
301        ///
302        /// `current_stats.cancel_reason` is set to `Some`.
303        current_stats: RunStats,
304
305        /// The number of tests still running.
306        running: usize,
307    },
308
309    /// A forcible kill was requested due to receiving a signal.
310    RunBeginKill {
311        /// The number of setup scripts still running.
312        setup_scripts_running: usize,
313
314        /// Current statistics for number of tests so far.
315        ///
316        /// `current_stats.cancel_reason` is set to `Some`.
317        current_stats: RunStats,
318
319        /// The number of tests still running.
320        running: usize,
321    },
322
323    /// A SIGTSTP event was received and the run was paused.
324    RunPaused {
325        /// The number of setup scripts running.
326        setup_scripts_running: usize,
327
328        /// The number of tests currently running.
329        running: usize,
330    },
331
332    /// A SIGCONT event was received and the run is being continued.
333    RunContinued {
334        /// The number of setup scripts that will be started up again.
335        setup_scripts_running: usize,
336
337        /// The number of tests that will be started up again.
338        running: usize,
339    },
340
341    /// When running stress tests serially, a sub-run finished.
342    StressSubRunFinished {
343        /// The amount of progress completed so far.
344        progress: StressProgress,
345
346        /// The amount of time it took for this sub-run to complete.
347        sub_elapsed: Duration,
348
349        /// Statistics for the sub-run.
350        sub_stats: RunStats,
351    },
352
353    /// The test run finished.
354    RunFinished {
355        /// The unique ID for this run.
356        run_id: ReportUuid,
357
358        /// The time at which the run was started.
359        start_time: DateTime<FixedOffset>,
360
361        /// The amount of time it took for the tests to run.
362        elapsed: Duration,
363
364        /// Statistics for the run, or overall statistics for stress tests.
365        run_stats: RunFinishedStats,
366    },
367}
368
369/// Progress for a stress test.
370#[derive(Clone, Debug)]
371pub enum StressProgress {
372    /// This is a count-based stress run.
373    Count {
374        /// The total number of stress runs.
375        total: StressCount,
376
377        /// The total time that has elapsed across all stress runs so far.
378        elapsed: Duration,
379
380        /// The number of stress runs that have been completed.
381        completed: u32,
382    },
383
384    /// This is a time-based stress run.
385    Time {
386        /// The total time for the stress run.
387        total: Duration,
388
389        /// The total time that has elapsed across all stress runs so far.
390        elapsed: Duration,
391
392        /// The number of stress runs that have been completed.
393        completed: u32,
394    },
395}
396
397impl StressProgress {
398    /// Returns the remaining amount of work if the progress indicates there's
399    /// still more to do, otherwise `None`.
400    pub fn remaining(&self) -> Option<StressRemaining> {
401        match self {
402            Self::Count {
403                total: StressCount::Count(total),
404                elapsed: _,
405                completed,
406            } => total
407                .get()
408                .checked_sub(*completed)
409                .and_then(|remaining| NonZero::try_from(remaining).ok())
410                .map(StressRemaining::Count),
411            Self::Count {
412                total: StressCount::Infinite,
413                ..
414            } => Some(StressRemaining::Infinite),
415            Self::Time {
416                total,
417                elapsed,
418                completed: _,
419            } => total.checked_sub(*elapsed).map(StressRemaining::Time),
420        }
421    }
422}
423
424/// For a stress test, the amount of time or number of stress runs remaining.
425#[derive(Clone, Debug)]
426pub enum StressRemaining {
427    /// The number of stress runs remaining, guaranteed to be non-zero.
428    Count(NonZero<u32>),
429
430    /// Infinite number of stress runs remaining.
431    Infinite,
432
433    /// The amount of time remaining.
434    Time(Duration),
435}
436
437/// The index of the current stress run.
438#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
439pub struct StressIndex {
440    /// The 0-indexed index.
441    pub current: u32,
442
443    /// The total number of stress runs, if that is available.
444    pub total: Option<NonZero<u32>>,
445}
446
447/// Statistics for a completed test run or stress run.
448#[derive(Clone, Debug)]
449pub enum RunFinishedStats {
450    /// A single test run was completed.
451    Single(RunStats),
452
453    /// A stress run was completed.
454    Stress(StressRunStats),
455}
456
457impl RunFinishedStats {
458    /// For a single run, returns a summary of statistics as an enum. For a
459    /// stress run, returns a summary for the last sub-run.
460    pub fn final_stats(&self) -> FinalRunStats {
461        match self {
462            Self::Single(stats) => stats.summarize_final(),
463            Self::Stress(stats) => stats.last_final_stats,
464        }
465    }
466}
467
468/// Statistics for a test run.
469#[derive(Copy, Clone, Default, Debug, Eq, PartialEq)]
470pub struct RunStats {
471    /// The total number of tests that were expected to be run at the beginning.
472    ///
473    /// If the test run is cancelled, this will be more than `finished_count` at the end.
474    pub initial_run_count: usize,
475
476    /// The total number of tests that finished running.
477    pub finished_count: usize,
478
479    /// The total number of setup scripts that were expected to be run at the beginning.
480    ///
481    /// If the test run is cancelled, this will be more than `finished_count` at the end.
482    pub setup_scripts_initial_count: usize,
483
484    /// The total number of setup scripts that finished running.
485    pub setup_scripts_finished_count: usize,
486
487    /// The number of setup scripts that passed.
488    pub setup_scripts_passed: usize,
489
490    /// The number of setup scripts that failed.
491    pub setup_scripts_failed: usize,
492
493    /// The number of setup scripts that encountered an execution failure.
494    pub setup_scripts_exec_failed: usize,
495
496    /// The number of setup scripts that timed out.
497    pub setup_scripts_timed_out: usize,
498
499    /// The number of tests that passed. Includes `passed_slow`, `flaky` and `leaky`.
500    pub passed: usize,
501
502    /// The number of slow tests that passed.
503    pub passed_slow: usize,
504
505    /// The number of tests that passed on retry.
506    pub flaky: usize,
507
508    /// The number of tests that failed.
509    pub failed: usize,
510
511    /// The number of failed tests that were slow.
512    pub failed_slow: usize,
513
514    /// The number of tests that timed out.
515    pub timed_out: usize,
516
517    /// The number of tests that passed but leaked handles.
518    pub leaky: usize,
519
520    /// The number of tests that otherwise passed, but leaked handles and were
521    /// treated as failed as a result.
522    pub leaky_failed: usize,
523
524    /// The number of tests that encountered an execution failure.
525    pub exec_failed: usize,
526
527    /// The number of tests that were skipped.
528    pub skipped: usize,
529
530    /// If the run is cancelled, the reason the cancellation is happening.
531    pub cancel_reason: Option<CancelReason>,
532}
533
534impl RunStats {
535    /// Returns true if there are any failures recorded in the stats.
536    pub fn has_failures(&self) -> bool {
537        self.failed_setup_script_count() > 0 || self.failed_count() > 0
538    }
539
540    /// Returns count of setup scripts that did not pass.
541    pub fn failed_setup_script_count(&self) -> usize {
542        self.setup_scripts_failed + self.setup_scripts_exec_failed + self.setup_scripts_timed_out
543    }
544
545    /// Returns count of tests that did not pass.
546    pub fn failed_count(&self) -> usize {
547        self.failed + self.exec_failed + self.timed_out
548    }
549
550    /// Summarizes the stats as an enum at the end of a test run.
551    pub fn summarize_final(&self) -> FinalRunStats {
552        // Check for failures first. The order of setup scripts vs tests should
553        // not be important, though we don't assert that here.
554        if self.failed_setup_script_count() > 0 {
555            // Is this related to a cancellation other than one directly caused
556            // by the failure?
557            if self.cancel_reason > Some(CancelReason::TestFailure) {
558                FinalRunStats::Cancelled {
559                    reason: self.cancel_reason,
560                    kind: RunStatsFailureKind::SetupScript,
561                }
562            } else {
563                FinalRunStats::Failed(RunStatsFailureKind::SetupScript)
564            }
565        } else if self.setup_scripts_initial_count > self.setup_scripts_finished_count {
566            FinalRunStats::Cancelled {
567                reason: self.cancel_reason,
568                kind: RunStatsFailureKind::SetupScript,
569            }
570        } else if self.failed_count() > 0 {
571            let kind = RunStatsFailureKind::Test {
572                initial_run_count: self.initial_run_count,
573                not_run: self.initial_run_count.saturating_sub(self.finished_count),
574            };
575
576            // Is this related to a cancellation other than one directly caused
577            // by the failure?
578            if self.cancel_reason > Some(CancelReason::TestFailure) {
579                FinalRunStats::Cancelled {
580                    reason: self.cancel_reason,
581                    kind,
582                }
583            } else {
584                FinalRunStats::Failed(kind)
585            }
586        } else if self.initial_run_count > self.finished_count {
587            FinalRunStats::Cancelled {
588                reason: self.cancel_reason,
589                kind: RunStatsFailureKind::Test {
590                    initial_run_count: self.initial_run_count,
591                    not_run: self.initial_run_count.saturating_sub(self.finished_count),
592                },
593            }
594        } else if self.finished_count == 0 {
595            FinalRunStats::NoTestsRun
596        } else {
597            FinalRunStats::Success
598        }
599    }
600
601    pub(crate) fn on_setup_script_finished(&mut self, status: &SetupScriptExecuteStatus) {
602        self.setup_scripts_finished_count += 1;
603
604        match status.result {
605            ExecutionResult::Pass
606            | ExecutionResult::Leak {
607                result: LeakTimeoutResult::Pass,
608            } => {
609                self.setup_scripts_passed += 1;
610            }
611            ExecutionResult::Fail { .. }
612            | ExecutionResult::Leak {
613                result: LeakTimeoutResult::Fail,
614            } => {
615                self.setup_scripts_failed += 1;
616            }
617            ExecutionResult::ExecFail => {
618                self.setup_scripts_exec_failed += 1;
619            }
620            ExecutionResult::Timeout => {
621                self.setup_scripts_timed_out += 1;
622            }
623        }
624    }
625
626    pub(crate) fn on_test_finished(&mut self, run_statuses: &ExecutionStatuses) {
627        self.finished_count += 1;
628        // run_statuses is guaranteed to have at least one element.
629        // * If the last element is success, treat it as success (and possibly flaky).
630        // * If the last element is a failure, use it to determine fail/exec fail.
631        // Note that this is different from what Maven Surefire does (use the first failure):
632        // https://maven.apache.org/surefire/maven-surefire-plugin/examples/rerun-failing-tests.html
633        //
634        // This is not likely to matter much in practice since failures are likely to be of the
635        // same type.
636        let last_status = run_statuses.last_status();
637        match last_status.result {
638            ExecutionResult::Pass => {
639                self.passed += 1;
640                if last_status.is_slow {
641                    self.passed_slow += 1;
642                }
643                if run_statuses.len() > 1 {
644                    self.flaky += 1;
645                }
646            }
647            ExecutionResult::Leak {
648                result: LeakTimeoutResult::Pass,
649            } => {
650                self.passed += 1;
651                self.leaky += 1;
652                if last_status.is_slow {
653                    self.passed_slow += 1;
654                }
655                if run_statuses.len() > 1 {
656                    self.flaky += 1;
657                }
658            }
659            ExecutionResult::Leak {
660                result: LeakTimeoutResult::Fail,
661            } => {
662                self.failed += 1;
663                self.leaky_failed += 1;
664                if last_status.is_slow {
665                    self.failed_slow += 1;
666                }
667            }
668            ExecutionResult::Fail { .. } => {
669                self.failed += 1;
670                if last_status.is_slow {
671                    self.failed_slow += 1;
672                }
673            }
674            ExecutionResult::Timeout => self.timed_out += 1,
675            ExecutionResult::ExecFail => self.exec_failed += 1,
676        }
677    }
678}
679
680/// A type summarizing the possible outcomes of a test run.
681#[derive(Copy, Clone, Debug, Eq, PartialEq)]
682pub enum FinalRunStats {
683    /// The test run was successful, or is successful so far.
684    Success,
685
686    /// The test run was successful, or is successful so far, but no tests were selected to run.
687    NoTestsRun,
688
689    /// The test run was cancelled.
690    Cancelled {
691        /// The reason for cancellation, if available.
692        ///
693        /// This should generally be available, but may be None if some tests
694        /// that were selected to run were not executed.
695        reason: Option<CancelReason>,
696
697        /// The kind of failure that occurred.
698        kind: RunStatsFailureKind,
699    },
700
701    /// At least one test failed.
702    Failed(RunStatsFailureKind),
703}
704
705/// Statistics for a stress run.
706#[derive(Clone, Debug)]
707pub struct StressRunStats {
708    /// The number of stress runs completed.
709    pub completed: StressIndex,
710
711    /// The number of stress runs that succeeded.
712    pub success_count: u32,
713
714    /// The number of stress runs that failed.
715    pub failed_count: u32,
716
717    /// The last stress run's `FinalRunStats`.
718    pub last_final_stats: FinalRunStats,
719}
720
721impl StressRunStats {
722    /// Summarizes the stats as an enum at the end of a test run.
723    pub fn summarize_final(&self) -> StressFinalRunStats {
724        if self.failed_count > 0 {
725            StressFinalRunStats::Failed
726        } else if matches!(self.last_final_stats, FinalRunStats::Cancelled { .. }) {
727            StressFinalRunStats::Cancelled
728        } else if matches!(self.last_final_stats, FinalRunStats::NoTestsRun) {
729            StressFinalRunStats::NoTestsRun
730        } else {
731            StressFinalRunStats::Success
732        }
733    }
734}
735
736/// A summary of final statistics for a stress run.
737pub enum StressFinalRunStats {
738    /// The stress run was successful.
739    Success,
740
741    /// No tests were run.
742    NoTestsRun,
743
744    /// The stress run was cancelled.
745    Cancelled,
746
747    /// At least one stress run failed.
748    Failed,
749}
750
751/// A type summarizing the step at which a test run failed.
752#[derive(Copy, Clone, Debug, Eq, PartialEq)]
753pub enum RunStatsFailureKind {
754    /// The run was interrupted during setup script execution.
755    SetupScript,
756
757    /// The run was interrupted during test execution.
758    Test {
759        /// The total number of tests scheduled.
760        initial_run_count: usize,
761
762        /// The number of tests not run, or for a currently-executing test the number queued up to
763        /// run.
764        not_run: usize,
765    },
766}
767
768/// Information about executions of a test, including retries.
769#[derive(Clone, Debug)]
770pub struct ExecutionStatuses {
771    /// This is guaranteed to be non-empty.
772    statuses: Vec<ExecuteStatus>,
773}
774
775#[expect(clippy::len_without_is_empty)] // RunStatuses is never empty
776impl ExecutionStatuses {
777    pub(crate) fn new(statuses: Vec<ExecuteStatus>) -> Self {
778        Self { statuses }
779    }
780
781    /// Returns the last execution status.
782    ///
783    /// This status is typically used as the final result.
784    pub fn last_status(&self) -> &ExecuteStatus {
785        self.statuses
786            .last()
787            .expect("execution statuses is non-empty")
788    }
789
790    /// Iterates over all the statuses.
791    pub fn iter(&self) -> impl DoubleEndedIterator<Item = &'_ ExecuteStatus> + '_ {
792        self.statuses.iter()
793    }
794
795    /// Returns the number of times the test was executed.
796    pub fn len(&self) -> usize {
797        self.statuses.len()
798    }
799
800    /// Returns a description of self.
801    pub fn describe(&self) -> ExecutionDescription<'_> {
802        let last_status = self.last_status();
803        if last_status.result.is_success() {
804            if self.statuses.len() > 1 {
805                ExecutionDescription::Flaky {
806                    last_status,
807                    prior_statuses: &self.statuses[..self.statuses.len() - 1],
808                }
809            } else {
810                ExecutionDescription::Success {
811                    single_status: last_status,
812                }
813            }
814        } else {
815            let first_status = self
816                .statuses
817                .first()
818                .expect("execution statuses is non-empty");
819            let retries = &self.statuses[1..];
820            ExecutionDescription::Failure {
821                first_status,
822                last_status,
823                retries,
824            }
825        }
826    }
827}
828
829/// A description of test executions obtained from `ExecuteStatuses`.
830///
831/// This can be used to quickly determine whether a test passed, failed or was flaky.
832#[derive(Copy, Clone, Debug)]
833pub enum ExecutionDescription<'a> {
834    /// The test was run once and was successful.
835    Success {
836        /// The status of the test.
837        single_status: &'a ExecuteStatus,
838    },
839
840    /// The test was run more than once. The final result was successful.
841    Flaky {
842        /// The last, successful status.
843        last_status: &'a ExecuteStatus,
844
845        /// Previous statuses, none of which are successes.
846        prior_statuses: &'a [ExecuteStatus],
847    },
848
849    /// The test was run once, or possibly multiple times. All runs failed.
850    Failure {
851        /// The first, failing status.
852        first_status: &'a ExecuteStatus,
853
854        /// The last, failing status. Same as the first status if no retries were performed.
855        last_status: &'a ExecuteStatus,
856
857        /// Any retries that were performed. All of these runs failed.
858        ///
859        /// May be empty.
860        retries: &'a [ExecuteStatus],
861    },
862}
863
864impl<'a> ExecutionDescription<'a> {
865    /// Returns the status level for this `ExecutionDescription`.
866    pub fn status_level(&self) -> StatusLevel {
867        match self {
868            ExecutionDescription::Success { single_status } => match single_status.result {
869                ExecutionResult::Leak {
870                    result: LeakTimeoutResult::Pass,
871                } => StatusLevel::Leak,
872                ExecutionResult::Pass => StatusLevel::Pass,
873                other => unreachable!("Success only permits Pass or Leak Pass, found {other:?}"),
874            },
875            // A flaky test implies that we print out retry information for it.
876            ExecutionDescription::Flaky { .. } => StatusLevel::Retry,
877            ExecutionDescription::Failure { .. } => StatusLevel::Fail,
878        }
879    }
880
881    /// Returns the final status level for this `ExecutionDescription`.
882    pub fn final_status_level(&self) -> FinalStatusLevel {
883        match self {
884            ExecutionDescription::Success { single_status, .. } => {
885                // Slow is higher priority than leaky, so return slow first here.
886                if single_status.is_slow {
887                    FinalStatusLevel::Slow
888                } else {
889                    match single_status.result {
890                        ExecutionResult::Pass => FinalStatusLevel::Pass,
891                        ExecutionResult::Leak {
892                            result: LeakTimeoutResult::Pass,
893                        } => FinalStatusLevel::Leak,
894                        other => {
895                            unreachable!("Success only permits Pass or Leak Pass, found {other:?}")
896                        }
897                    }
898                }
899            }
900            // A flaky test implies that we print out retry information for it.
901            ExecutionDescription::Flaky { .. } => FinalStatusLevel::Flaky,
902            ExecutionDescription::Failure { .. } => FinalStatusLevel::Fail,
903        }
904    }
905
906    /// Returns the last run status.
907    pub fn last_status(&self) -> &'a ExecuteStatus {
908        match self {
909            ExecutionDescription::Success {
910                single_status: last_status,
911            }
912            | ExecutionDescription::Flaky { last_status, .. }
913            | ExecutionDescription::Failure { last_status, .. } => last_status,
914        }
915    }
916}
917
918/// Information about a single execution of a test.
919#[derive(Clone, Debug)]
920pub struct ExecuteStatus {
921    /// Retry-related data.
922    pub retry_data: RetryData,
923    /// The stdout and stderr output for this test.
924    pub output: ChildExecutionOutput,
925    /// The execution result for this test: pass, fail or execution error.
926    pub result: ExecutionResult,
927    /// The time at which the test started.
928    pub start_time: DateTime<FixedOffset>,
929    /// The time it took for the test to run.
930    pub time_taken: Duration,
931    /// Whether this test counts as slow.
932    pub is_slow: bool,
933    /// The delay will be non-zero if this is a retry and delay was specified.
934    pub delay_before_start: Duration,
935}
936
937/// Information about the execution of a setup script.
938#[derive(Clone, Debug)]
939pub struct SetupScriptExecuteStatus {
940    /// Output for this setup script.
941    pub output: ChildExecutionOutput,
942
943    /// The execution result for this setup script: pass, fail or execution error.
944    pub result: ExecutionResult,
945
946    /// The time at which the script started.
947    pub start_time: DateTime<FixedOffset>,
948
949    /// The time it took for the script to run.
950    pub time_taken: Duration,
951
952    /// Whether this script counts as slow.
953    pub is_slow: bool,
954
955    /// The map of environment variables that were set by this script.
956    ///
957    /// `None` if an error occurred while running the script or reading the
958    /// environment map.
959    pub env_map: Option<SetupScriptEnvMap>,
960}
961
962/// A map of environment variables set by a setup script.
963///
964/// Part of [`SetupScriptExecuteStatus`].
965#[derive(Clone, Debug)]
966pub struct SetupScriptEnvMap {
967    /// The map of environment variables set by the script.
968    pub env_map: BTreeMap<String, String>,
969}
970
971/// Data related to retries for a test.
972#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)]
973pub struct RetryData {
974    /// The current attempt. In the range `[1, total_attempts]`.
975    pub attempt: usize,
976
977    /// The total number of times this test can be run. Equal to `1 + retries`.
978    pub total_attempts: usize,
979}
980
981impl RetryData {
982    /// Returns true if there are no more attempts after this.
983    pub fn is_last_attempt(&self) -> bool {
984        self.attempt >= self.total_attempts
985    }
986}
987
988/// Whether a test passed, failed or an error occurred while executing the test.
989#[derive(Copy, Clone, Debug, Eq, PartialEq)]
990pub enum ExecutionResult {
991    /// The test passed.
992    Pass,
993    /// The test passed but leaked handles. This usually indicates that
994    /// a subprocess that inherit standard IO was created, but it didn't shut down when
995    /// the test failed.
996    Leak {
997        /// Whether this leak was treated as a failure.
998        ///
999        /// Note the difference between `Fail { leaked: true }` and `Leak {
1000        /// failed: true }`. In the former case, the test failed and also leaked
1001        /// handles. In the latter case, the test passed but leaked handles, and
1002        /// configuration indicated that this is a failure.
1003        result: LeakTimeoutResult,
1004    },
1005    /// The test failed.
1006    Fail {
1007        /// The abort status of the test, if any (for example, the signal on Unix).
1008        failure_status: FailureStatus,
1009
1010        /// Whether a test leaked handles. If set to true, this usually indicates that
1011        /// a subprocess that inherit standard IO was created, but it didn't shut down when
1012        /// the test failed.
1013        leaked: bool,
1014    },
1015    /// An error occurred while executing the test.
1016    ExecFail,
1017    /// The test was terminated due to a timeout.
1018    Timeout,
1019}
1020
1021impl ExecutionResult {
1022    /// Returns true if the test was successful.
1023    pub fn is_success(self) -> bool {
1024        match self {
1025            ExecutionResult::Pass
1026            | ExecutionResult::Leak {
1027                result: LeakTimeoutResult::Pass,
1028            } => true,
1029            ExecutionResult::Leak {
1030                result: LeakTimeoutResult::Fail,
1031            }
1032            | ExecutionResult::Fail { .. }
1033            | ExecutionResult::ExecFail
1034            | ExecutionResult::Timeout => false,
1035        }
1036    }
1037}
1038
1039/// Failure status: either an exit code or an abort status.
1040#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1041pub enum FailureStatus {
1042    /// The test exited with a non-zero exit code.
1043    ExitCode(i32),
1044
1045    /// The test aborted.
1046    Abort(AbortStatus),
1047}
1048
1049impl FailureStatus {
1050    /// Extract the failure status from an `ExitStatus`.
1051    pub fn extract(exit_status: ExitStatus) -> Self {
1052        if let Some(abort_status) = AbortStatus::extract(exit_status) {
1053            FailureStatus::Abort(abort_status)
1054        } else {
1055            FailureStatus::ExitCode(
1056                exit_status
1057                    .code()
1058                    .expect("if abort_status is None, then code must be present"),
1059            )
1060        }
1061    }
1062}
1063
1064/// A regular exit code or Windows NT abort status for a test.
1065///
1066/// Returned as part of the [`ExecutionResult::Fail`] variant.
1067#[derive(Copy, Clone, Eq, PartialEq)]
1068pub enum AbortStatus {
1069    /// The test was aborted due to a signal on Unix.
1070    #[cfg(unix)]
1071    UnixSignal(i32),
1072
1073    /// The test was determined to have aborted because the high bit was set on Windows.
1074    #[cfg(windows)]
1075    WindowsNtStatus(windows_sys::Win32::Foundation::NTSTATUS),
1076
1077    /// The test was terminated via job object on Windows.
1078    #[cfg(windows)]
1079    JobObject,
1080}
1081
1082impl AbortStatus {
1083    /// Extract the abort status from an [`ExitStatus`].
1084    pub fn extract(exit_status: ExitStatus) -> Option<Self> {
1085        cfg_if::cfg_if! {
1086            if #[cfg(unix)] {
1087                // On Unix, extract the signal if it's found.
1088                use std::os::unix::process::ExitStatusExt;
1089                exit_status.signal().map(AbortStatus::UnixSignal)
1090            } else if #[cfg(windows)] {
1091                exit_status.code().and_then(|code| {
1092                    (code < 0).then_some(AbortStatus::WindowsNtStatus(code))
1093                })
1094            } else {
1095                None
1096            }
1097        }
1098    }
1099}
1100
1101impl fmt::Debug for AbortStatus {
1102    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1103        match self {
1104            #[cfg(unix)]
1105            AbortStatus::UnixSignal(signal) => write!(f, "UnixSignal({signal})"),
1106            #[cfg(windows)]
1107            AbortStatus::WindowsNtStatus(status) => write!(f, "WindowsNtStatus({status:x})"),
1108            #[cfg(windows)]
1109            AbortStatus::JobObject => write!(f, "JobObject"),
1110        }
1111    }
1112}
1113
1114// Note: the order here matters -- it indicates severity of cancellation
1115/// The reason why a test run is being cancelled.
1116#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1117#[cfg_attr(test, derive(test_strategy::Arbitrary))]
1118pub enum CancelReason {
1119    /// A setup script failed.
1120    SetupScriptFailure,
1121
1122    /// A test failed and --no-fail-fast wasn't specified.
1123    TestFailure,
1124
1125    /// An error occurred while reporting results.
1126    ReportError,
1127
1128    /// The global timeout was exceeded.
1129    GlobalTimeout,
1130
1131    /// A termination signal (on Unix, SIGTERM or SIGHUP) was received.
1132    Signal,
1133
1134    /// An interrupt (on Unix, Ctrl-C) was received.
1135    Interrupt,
1136
1137    /// A second signal was received, and the run is being forcibly killed.
1138    SecondSignal,
1139}
1140
1141impl CancelReason {
1142    pub(crate) fn to_static_str(self) -> &'static str {
1143        match self {
1144            CancelReason::SetupScriptFailure => "setup script failure",
1145            CancelReason::TestFailure => "test failure",
1146            CancelReason::ReportError => "reporting error",
1147            CancelReason::GlobalTimeout => "global timeout",
1148            CancelReason::Signal => "signal",
1149            CancelReason::Interrupt => "interrupt",
1150            CancelReason::SecondSignal => "second signal",
1151        }
1152    }
1153}
1154/// The kind of unit of work that nextest is executing.
1155#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1156pub enum UnitKind {
1157    /// A test.
1158    Test,
1159
1160    /// A script (e.g. a setup script).
1161    Script,
1162}
1163
1164impl UnitKind {
1165    pub(crate) const WAITING_ON_TEST_MESSAGE: &str = "waiting on test process";
1166    pub(crate) const WAITING_ON_SCRIPT_MESSAGE: &str = "waiting on script process";
1167
1168    pub(crate) const EXECUTING_TEST_MESSAGE: &str = "executing test";
1169    pub(crate) const EXECUTING_SCRIPT_MESSAGE: &str = "executing script";
1170
1171    pub(crate) fn waiting_on_message(&self) -> &'static str {
1172        match self {
1173            UnitKind::Test => Self::WAITING_ON_TEST_MESSAGE,
1174            UnitKind::Script => Self::WAITING_ON_SCRIPT_MESSAGE,
1175        }
1176    }
1177
1178    pub(crate) fn executing_message(&self) -> &'static str {
1179        match self {
1180            UnitKind::Test => Self::EXECUTING_TEST_MESSAGE,
1181            UnitKind::Script => Self::EXECUTING_SCRIPT_MESSAGE,
1182        }
1183    }
1184}
1185
1186impl fmt::Display for UnitKind {
1187    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1188        match self {
1189            UnitKind::Script => write!(f, "script"),
1190            UnitKind::Test => write!(f, "test"),
1191        }
1192    }
1193}
1194
1195/// A response to an information request.
1196#[derive(Clone, Debug)]
1197pub enum InfoResponse<'a> {
1198    /// A setup script's response.
1199    SetupScript(SetupScriptInfoResponse<'a>),
1200
1201    /// A test's response.
1202    Test(TestInfoResponse<'a>),
1203}
1204
1205/// A setup script's response to an information request.
1206#[derive(Clone, Debug)]
1207pub struct SetupScriptInfoResponse<'a> {
1208    /// The stress index of the setup script.
1209    pub stress_index: Option<StressIndex>,
1210
1211    /// The identifier of the setup script instance.
1212    pub script_id: ScriptId,
1213
1214    /// The program to run.
1215    pub program: String,
1216
1217    /// The list of arguments to the program.
1218    pub args: &'a [String],
1219
1220    /// The state of the setup script.
1221    pub state: UnitState,
1222
1223    /// Output obtained from the setup script.
1224    pub output: ChildExecutionOutput,
1225}
1226
1227/// A test's response to an information request.
1228#[derive(Clone, Debug)]
1229pub struct TestInfoResponse<'a> {
1230    /// The stress index of the test.
1231    pub stress_index: Option<StressIndex>,
1232
1233    /// The test instance that the information is about.
1234    pub test_instance: TestInstanceId<'a>,
1235
1236    /// Information about retries.
1237    pub retry_data: RetryData,
1238
1239    /// The state of the test.
1240    pub state: UnitState,
1241
1242    /// Output obtained from the test.
1243    pub output: ChildExecutionOutput,
1244}
1245
1246/// The current state of a test or script process: running, exiting, or
1247/// terminating.
1248///
1249/// Part of information response requests.
1250#[derive(Clone, Debug)]
1251pub enum UnitState {
1252    /// The unit is currently running.
1253    Running {
1254        /// The process ID.
1255        pid: u32,
1256
1257        /// The amount of time the unit has been running.
1258        time_taken: Duration,
1259
1260        /// `Some` if the test is marked as slow, along with the duration after
1261        /// which it was marked as slow.
1262        slow_after: Option<Duration>,
1263    },
1264
1265    /// The test has finished running, and is currently in the process of
1266    /// exiting.
1267    Exiting {
1268        /// The process ID.
1269        pid: u32,
1270
1271        /// The amount of time the unit ran for.
1272        time_taken: Duration,
1273
1274        /// `Some` if the unit is marked as slow, along with the duration after
1275        /// which it was marked as slow.
1276        slow_after: Option<Duration>,
1277
1278        /// The tentative execution result before leaked status is determined.
1279        ///
1280        /// None means that the exit status could not be read, and should be
1281        /// treated as a failure.
1282        tentative_result: Option<ExecutionResult>,
1283
1284        /// How long has been spent waiting for the process to exit.
1285        waiting_duration: Duration,
1286
1287        /// How much longer nextest will wait until the test is marked leaky.
1288        remaining: Duration,
1289    },
1290
1291    /// The child process is being terminated by nextest.
1292    Terminating(UnitTerminatingState),
1293
1294    /// The unit has finished running and the process has exited.
1295    Exited {
1296        /// The result of executing the unit.
1297        result: ExecutionResult,
1298
1299        /// The amount of time the unit ran for.
1300        time_taken: Duration,
1301
1302        /// `Some` if the unit is marked as slow, along with the duration after
1303        /// which it was marked as slow.
1304        slow_after: Option<Duration>,
1305    },
1306
1307    /// A delay is being waited out before the next attempt of the test is
1308    /// started. (Only relevant for tests.)
1309    DelayBeforeNextAttempt {
1310        /// The previous execution result.
1311        previous_result: ExecutionResult,
1312
1313        /// Whether the previous attempt was marked as slow.
1314        previous_slow: bool,
1315
1316        /// How long has been spent waiting so far.
1317        waiting_duration: Duration,
1318
1319        /// How much longer nextest will wait until retrying the test.
1320        remaining: Duration,
1321    },
1322}
1323
1324impl UnitState {
1325    /// Returns true if the state has a valid output attached to it.
1326    pub fn has_valid_output(&self) -> bool {
1327        match self {
1328            UnitState::Running { .. }
1329            | UnitState::Exiting { .. }
1330            | UnitState::Terminating(_)
1331            | UnitState::Exited { .. } => true,
1332            UnitState::DelayBeforeNextAttempt { .. } => false,
1333        }
1334    }
1335}
1336
1337/// The current terminating state of a test or script process.
1338///
1339/// Part of [`UnitState::Terminating`].
1340#[derive(Clone, Debug)]
1341pub struct UnitTerminatingState {
1342    /// The process ID.
1343    pub pid: u32,
1344
1345    /// The amount of time the unit ran for.
1346    pub time_taken: Duration,
1347
1348    /// The reason for the termination.
1349    pub reason: UnitTerminateReason,
1350
1351    /// The method by which the process is being terminated.
1352    pub method: UnitTerminateMethod,
1353
1354    /// How long has been spent waiting for the process to exit.
1355    pub waiting_duration: Duration,
1356
1357    /// How much longer nextest will wait until a kill command is sent to the process.
1358    pub remaining: Duration,
1359}
1360
1361/// The reason for a script or test being forcibly terminated by nextest.
1362///
1363/// Part of information response requests.
1364#[derive(Clone, Copy, Debug)]
1365pub enum UnitTerminateReason {
1366    /// The unit is being terminated due to a test timeout being hit.
1367    Timeout,
1368
1369    /// The unit is being terminated due to nextest receiving a signal.
1370    Signal,
1371
1372    /// The unit is being terminated due to an interrupt (i.e. Ctrl-C).
1373    Interrupt,
1374}
1375
1376impl fmt::Display for UnitTerminateReason {
1377    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1378        match self {
1379            UnitTerminateReason::Timeout => write!(f, "timeout"),
1380            UnitTerminateReason::Signal => write!(f, "signal"),
1381            UnitTerminateReason::Interrupt => write!(f, "interrupt"),
1382        }
1383    }
1384}
1385
1386/// The way in which a script or test is being forcibly terminated by nextest.
1387#[derive(Clone, Copy, Debug)]
1388pub enum UnitTerminateMethod {
1389    /// The unit is being terminated by sending a signal.
1390    #[cfg(unix)]
1391    Signal(UnitTerminateSignal),
1392
1393    /// The unit is being terminated by terminating the Windows job object.
1394    #[cfg(windows)]
1395    JobObject,
1396
1397    /// The unit is being waited on to exit. A termination signal will be sent
1398    /// if it doesn't exit within the grace period.
1399    ///
1400    /// On Windows, this occurs when nextest receives Ctrl-C. In that case, it
1401    /// is assumed that tests will also receive Ctrl-C and exit on their own. If
1402    /// tests do not exit within the grace period configured for them, their
1403    /// corresponding job objects will be terminated.
1404    #[cfg(windows)]
1405    Wait,
1406
1407    /// A fake method used for testing.
1408    #[cfg(test)]
1409    Fake,
1410}
1411
1412#[cfg(unix)]
1413/// The signal that is or was sent to terminate a script or test.
1414#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1415pub enum UnitTerminateSignal {
1416    /// The unit is being terminated by sending a SIGINT.
1417    Interrupt,
1418
1419    /// The unit is being terminated by sending a SIGTERM signal.
1420    Term,
1421
1422    /// The unit is being terminated by sending a SIGHUP signal.
1423    Hangup,
1424
1425    /// The unit is being terminated by sending a SIGQUIT signal.
1426    Quit,
1427
1428    /// The unit is being terminated by sending a SIGKILL signal.
1429    Kill,
1430}
1431
1432#[cfg(unix)]
1433impl fmt::Display for UnitTerminateSignal {
1434    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1435        match self {
1436            UnitTerminateSignal::Interrupt => write!(f, "SIGINT"),
1437            UnitTerminateSignal::Term => write!(f, "SIGTERM"),
1438            UnitTerminateSignal::Hangup => write!(f, "SIGHUP"),
1439            UnitTerminateSignal::Quit => write!(f, "SIGQUIT"),
1440            UnitTerminateSignal::Kill => write!(f, "SIGKILL"),
1441        }
1442    }
1443}
1444
1445#[cfg(test)]
1446mod tests {
1447    use super::*;
1448
1449    #[test]
1450    fn test_is_success() {
1451        assert_eq!(
1452            RunStats::default().summarize_final(),
1453            FinalRunStats::NoTestsRun,
1454            "empty run => no tests run"
1455        );
1456        assert_eq!(
1457            RunStats {
1458                initial_run_count: 42,
1459                finished_count: 42,
1460                ..RunStats::default()
1461            }
1462            .summarize_final(),
1463            FinalRunStats::Success,
1464            "initial run count = final run count => success"
1465        );
1466        assert_eq!(
1467            RunStats {
1468                initial_run_count: 42,
1469                finished_count: 41,
1470                ..RunStats::default()
1471            }
1472            .summarize_final(),
1473            FinalRunStats::Cancelled {
1474                reason: None,
1475                kind: RunStatsFailureKind::Test {
1476                    initial_run_count: 42,
1477                    not_run: 1
1478                }
1479            },
1480            "initial run count > final run count => cancelled"
1481        );
1482        assert_eq!(
1483            RunStats {
1484                initial_run_count: 42,
1485                finished_count: 42,
1486                failed: 1,
1487                ..RunStats::default()
1488            }
1489            .summarize_final(),
1490            FinalRunStats::Failed(RunStatsFailureKind::Test {
1491                initial_run_count: 42,
1492                not_run: 0
1493            }),
1494            "failed => failure"
1495        );
1496        assert_eq!(
1497            RunStats {
1498                initial_run_count: 42,
1499                finished_count: 42,
1500                exec_failed: 1,
1501                ..RunStats::default()
1502            }
1503            .summarize_final(),
1504            FinalRunStats::Failed(RunStatsFailureKind::Test {
1505                initial_run_count: 42,
1506                not_run: 0
1507            }),
1508            "exec failed => failure"
1509        );
1510        assert_eq!(
1511            RunStats {
1512                initial_run_count: 42,
1513                finished_count: 42,
1514                timed_out: 1,
1515                ..RunStats::default()
1516            }
1517            .summarize_final(),
1518            FinalRunStats::Failed(RunStatsFailureKind::Test {
1519                initial_run_count: 42,
1520                not_run: 0
1521            }),
1522            "timed out => failure"
1523        );
1524        assert_eq!(
1525            RunStats {
1526                initial_run_count: 42,
1527                finished_count: 42,
1528                skipped: 1,
1529                ..RunStats::default()
1530            }
1531            .summarize_final(),
1532            FinalRunStats::Success,
1533            "skipped => not considered a failure"
1534        );
1535
1536        assert_eq!(
1537            RunStats {
1538                setup_scripts_initial_count: 2,
1539                setup_scripts_finished_count: 1,
1540                ..RunStats::default()
1541            }
1542            .summarize_final(),
1543            FinalRunStats::Cancelled {
1544                reason: None,
1545                kind: RunStatsFailureKind::SetupScript,
1546            },
1547            "setup script failed => failure"
1548        );
1549
1550        assert_eq!(
1551            RunStats {
1552                setup_scripts_initial_count: 2,
1553                setup_scripts_finished_count: 2,
1554                setup_scripts_failed: 1,
1555                ..RunStats::default()
1556            }
1557            .summarize_final(),
1558            FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1559            "setup script failed => failure"
1560        );
1561        assert_eq!(
1562            RunStats {
1563                setup_scripts_initial_count: 2,
1564                setup_scripts_finished_count: 2,
1565                setup_scripts_exec_failed: 1,
1566                ..RunStats::default()
1567            }
1568            .summarize_final(),
1569            FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1570            "setup script exec failed => failure"
1571        );
1572        assert_eq!(
1573            RunStats {
1574                setup_scripts_initial_count: 2,
1575                setup_scripts_finished_count: 2,
1576                setup_scripts_timed_out: 1,
1577                ..RunStats::default()
1578            }
1579            .summarize_final(),
1580            FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1581            "setup script timed out => failure"
1582        );
1583        assert_eq!(
1584            RunStats {
1585                setup_scripts_initial_count: 2,
1586                setup_scripts_finished_count: 2,
1587                setup_scripts_passed: 2,
1588                ..RunStats::default()
1589            }
1590            .summarize_final(),
1591            FinalRunStats::NoTestsRun,
1592            "setup scripts passed => success, but no tests run"
1593        );
1594    }
1595}