nextest_runner/reporter/
events.rs

1// Copyright (c) The nextest Contributors
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Events for the reporter.
5//!
6//! These types form the interface between the test runner and the test
7//! reporter. The root structure for all events is [`TestEvent`].
8
9use super::{FinalStatusLevel, StatusLevel, TestOutputDisplay};
10#[cfg(test)]
11use crate::output_spec::ArbitraryOutputSpec;
12use crate::{
13    config::{
14        elements::{
15            FlakyResult, JunitFlakyFailStatus, LeakTimeoutResult, SlowTimeoutResult, TestGroup,
16        },
17        scripts::ScriptId,
18    },
19    errors::{ChildError, ChildFdError, ChildStartError, ErrorList},
20    list::{OwnedTestInstanceId, TestInstanceId, TestList},
21    output_spec::{LiveSpec, OutputSpec, SerializableOutputSpec},
22    runner::{StressCondition, StressCount},
23    test_output::{ChildExecutionOutput, ChildOutput, ChildSingleOutput},
24};
25use chrono::{DateTime, FixedOffset};
26use nextest_metadata::MismatchReason;
27use quick_junit::ReportUuid;
28use serde::{Deserialize, Serialize};
29use smol_str::SmolStr;
30use std::{
31    collections::BTreeMap, ffi::c_int, fmt, num::NonZero, process::ExitStatus, time::Duration,
32};
33
34/// The signal number for SIGTERM.
35///
36/// This is 15 on all platforms. We define it here rather than using `SIGTERM` because
37/// `SIGTERM` is not available on Windows, but the value is platform-independent.
38pub const SIGTERM: c_int = 15;
39
40/// A reporter event.
41#[derive(Clone, Debug)]
42pub enum ReporterEvent<'a> {
43    /// A periodic tick.
44    Tick,
45
46    /// A test event.
47    Test(Box<TestEvent<'a>>),
48}
49/// A test event.
50///
51/// Events are produced by a [`TestRunner`](crate::runner::TestRunner) and
52/// consumed by a [`Reporter`](crate::reporter::Reporter).
53#[derive(Clone, Debug)]
54pub struct TestEvent<'a> {
55    /// The time at which the event was generated, including the offset from UTC.
56    pub timestamp: DateTime<FixedOffset>,
57
58    /// The amount of time elapsed since the start of the test run.
59    pub elapsed: Duration,
60
61    /// The kind of test event this is.
62    pub kind: TestEventKind<'a>,
63}
64
65/// Scheduling information about a test's slot and group assignment.
66///
67/// This information is assigned by the `future_queue` scheduler and remains
68/// constant across all retry attempts of the same test.
69#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
70#[serde(rename_all = "kebab-case")]
71pub struct TestSlotAssignment {
72    /// The global slot number assigned to this test. Compact: starts from 0
73    /// and is always the smallest available number at assignment time.
74    pub global_slot: u64,
75
76    /// The slot number within this test's group, if the test is in a custom
77    /// group. `None` for tests in the global group.
78    pub group_slot: Option<u64>,
79
80    /// The test group this test belongs to.
81    pub test_group: TestGroup,
82}
83
84/// The kind of test event this is.
85///
86/// Forms part of [`TestEvent`].
87#[derive(Clone, Debug)]
88pub enum TestEventKind<'a> {
89    /// The test run started.
90    RunStarted {
91        /// The list of tests that will be run.
92        ///
93        /// The methods on the test list indicate the number of tests that will be run.
94        test_list: &'a TestList<'a>,
95
96        /// The UUID for this run.
97        run_id: ReportUuid,
98
99        /// The nextest profile chosen for this run.
100        profile_name: String,
101
102        /// The command-line arguments for the process.
103        cli_args: Vec<String>,
104
105        /// The stress condition for this run, if any.
106        stress_condition: Option<StressCondition>,
107    },
108
109    /// When running stress tests serially, a sub-run started.
110    StressSubRunStarted {
111        /// The amount of progress completed so far.
112        progress: StressProgress,
113    },
114
115    /// A setup script started.
116    SetupScriptStarted {
117        /// If a stress test is being run, the stress index, starting from 0.
118        stress_index: Option<StressIndex>,
119
120        /// The setup script index.
121        index: usize,
122
123        /// The total number of setup scripts.
124        total: usize,
125
126        /// The script ID.
127        script_id: ScriptId,
128
129        /// The program to run.
130        program: String,
131
132        /// The arguments to the program.
133        args: Vec<String>,
134
135        /// True if some output from the setup script is being passed through.
136        no_capture: bool,
137    },
138
139    /// A setup script was slow.
140    SetupScriptSlow {
141        /// If a stress test is being run, the stress index, starting from 0.
142        stress_index: Option<StressIndex>,
143
144        /// The script ID.
145        script_id: ScriptId,
146
147        /// The program to run.
148        program: String,
149
150        /// The arguments to the program.
151        args: Vec<String>,
152
153        /// The amount of time elapsed since the start of execution.
154        elapsed: Duration,
155
156        /// True if the script has hit its timeout and is about to be terminated.
157        will_terminate: bool,
158    },
159
160    /// A setup script completed execution.
161    SetupScriptFinished {
162        /// If a stress test is being run, the stress index, starting from 0.
163        stress_index: Option<StressIndex>,
164
165        /// The setup script index.
166        index: usize,
167
168        /// The total number of setup scripts.
169        total: usize,
170
171        /// The script ID.
172        script_id: ScriptId,
173
174        /// The program to run.
175        program: String,
176
177        /// The arguments to the program.
178        args: Vec<String>,
179
180        /// Whether the JUnit report should store success output for this script.
181        junit_store_success_output: bool,
182
183        /// Whether the JUnit report should store failure output for this script.
184        junit_store_failure_output: bool,
185
186        /// True if some output from the setup script was passed through.
187        no_capture: bool,
188
189        /// The execution status of the setup script.
190        run_status: SetupScriptExecuteStatus<LiveSpec>,
191    },
192
193    // TODO: add events for BinaryStarted and BinaryFinished? May want a slightly different way to
194    // do things, maybe a couple of reporter traits (one for the run as a whole and one for each
195    // binary).
196    /// A test started running.
197    TestStarted {
198        /// If a stress test is being run, the stress index, starting from 0.
199        stress_index: Option<StressIndex>,
200
201        /// The test instance that was started.
202        test_instance: TestInstanceId<'a>,
203
204        /// Scheduling information (slot and group assignment).
205        slot_assignment: TestSlotAssignment,
206
207        /// Current run statistics so far.
208        current_stats: RunStats,
209
210        /// The number of tests currently running, including this one.
211        running: usize,
212
213        /// The command line that will be used to run this test.
214        command_line: Vec<String>,
215    },
216
217    /// A test was slower than a configured soft timeout.
218    TestSlow {
219        /// If a stress test is being run, the stress index, starting from 0.
220        stress_index: Option<StressIndex>,
221
222        /// The test instance that was slow.
223        test_instance: TestInstanceId<'a>,
224
225        /// Retry data.
226        retry_data: RetryData,
227
228        /// The amount of time that has elapsed since the beginning of the test.
229        elapsed: Duration,
230
231        /// True if the test has hit its timeout and is about to be terminated.
232        will_terminate: bool,
233    },
234
235    /// A test attempt failed and will be retried in the future.
236    ///
237    /// This event does not occur on the final run of a failing test.
238    TestAttemptFailedWillRetry {
239        /// If a stress test is being run, the stress index, starting from 0.
240        stress_index: Option<StressIndex>,
241
242        /// The test instance that is being retried.
243        test_instance: TestInstanceId<'a>,
244
245        /// The status of this attempt to run the test. Will never be success.
246        run_status: ExecuteStatus<LiveSpec>,
247
248        /// The delay before the next attempt to run the test.
249        delay_before_next_attempt: Duration,
250
251        /// Whether failure outputs are printed out.
252        failure_output: TestOutputDisplay,
253
254        /// The current number of running tests.
255        running: usize,
256    },
257
258    /// A retry has started.
259    TestRetryStarted {
260        /// If a stress test is being run, the stress index, starting from 0.
261        stress_index: Option<StressIndex>,
262
263        /// The test instance that is being retried.
264        test_instance: TestInstanceId<'a>,
265
266        /// Scheduling information (slot and group assignment). Same as the
267        /// initial `TestStarted` event for this test.
268        slot_assignment: TestSlotAssignment,
269
270        /// Data related to retries.
271        retry_data: RetryData,
272
273        /// The current number of running tests.
274        running: usize,
275
276        /// The command line that will be used to run this test.
277        command_line: Vec<String>,
278    },
279
280    /// A test finished running.
281    TestFinished {
282        /// If a stress test is being run, the stress index, starting from 0.
283        stress_index: Option<StressIndex>,
284
285        /// The test instance that finished running.
286        test_instance: TestInstanceId<'a>,
287
288        /// Test setting for success output.
289        success_output: TestOutputDisplay,
290
291        /// Test setting for failure output.
292        failure_output: TestOutputDisplay,
293
294        /// Whether the JUnit report should store success output for this test.
295        junit_store_success_output: bool,
296
297        /// Whether the JUnit report should store failure output for this test.
298        junit_store_failure_output: bool,
299
300        /// How flaky-fail tests should be reported in JUnit.
301        junit_flaky_fail_status: JunitFlakyFailStatus,
302
303        /// Information about all the runs for this test.
304        run_statuses: ExecutionStatuses<LiveSpec>,
305
306        /// Current statistics for number of tests so far.
307        current_stats: RunStats,
308
309        /// The number of tests that are currently running, excluding this one.
310        running: usize,
311    },
312
313    /// A test was skipped.
314    TestSkipped {
315        /// If a stress test is being run, the stress index, starting from 0.
316        stress_index: Option<StressIndex>,
317
318        /// The test instance that was skipped.
319        test_instance: TestInstanceId<'a>,
320
321        /// The reason this test was skipped.
322        reason: MismatchReason,
323    },
324
325    /// An information request was received.
326    InfoStarted {
327        /// The number of tasks currently running. This is the same as the
328        /// number of expected responses.
329        total: usize,
330
331        /// Statistics for the run.
332        run_stats: RunStats,
333    },
334
335    /// Information about a script or test was received.
336    InfoResponse {
337        /// The index of the response, starting from 0.
338        index: usize,
339
340        /// The total number of responses expected.
341        total: usize,
342
343        /// The response itself.
344        response: InfoResponse<'a>,
345    },
346
347    /// An information request was completed.
348    InfoFinished {
349        /// The number of responses that were not received. In most cases, this
350        /// is 0.
351        missing: usize,
352    },
353
354    /// `Enter` was pressed. Either a newline or a progress bar snapshot needs
355    /// to be printed.
356    InputEnter {
357        /// Current statistics for number of tests so far.
358        current_stats: RunStats,
359
360        /// The number of tests running.
361        running: usize,
362    },
363
364    /// A cancellation notice was received.
365    RunBeginCancel {
366        /// The number of setup scripts still running.
367        setup_scripts_running: usize,
368
369        /// Current statistics for number of tests so far.
370        ///
371        /// `current_stats.cancel_reason` is set to `Some`.
372        current_stats: RunStats,
373
374        /// The number of tests still running.
375        running: usize,
376    },
377
378    /// A forcible kill was requested due to receiving a signal.
379    RunBeginKill {
380        /// The number of setup scripts still running.
381        setup_scripts_running: usize,
382
383        /// Current statistics for number of tests so far.
384        ///
385        /// `current_stats.cancel_reason` is set to `Some`.
386        current_stats: RunStats,
387
388        /// The number of tests still running.
389        running: usize,
390    },
391
392    /// A SIGTSTP event was received and the run was paused.
393    RunPaused {
394        /// The number of setup scripts running.
395        setup_scripts_running: usize,
396
397        /// The number of tests currently running.
398        running: usize,
399    },
400
401    /// A SIGCONT event was received and the run is being continued.
402    RunContinued {
403        /// The number of setup scripts that will be started up again.
404        setup_scripts_running: usize,
405
406        /// The number of tests that will be started up again.
407        running: usize,
408    },
409
410    /// When running stress tests serially, a sub-run finished.
411    StressSubRunFinished {
412        /// The amount of progress completed so far.
413        progress: StressProgress,
414
415        /// The amount of time it took for this sub-run to complete.
416        sub_elapsed: Duration,
417
418        /// Statistics for the sub-run.
419        sub_stats: RunStats,
420    },
421
422    /// The test run finished.
423    RunFinished {
424        /// The unique ID for this run.
425        run_id: ReportUuid,
426
427        /// The time at which the run was started.
428        start_time: DateTime<FixedOffset>,
429
430        /// The amount of time it took for the tests to run.
431        elapsed: Duration,
432
433        /// Statistics for the run, or overall statistics for stress tests.
434        run_stats: RunFinishedStats,
435
436        /// Tests that were expected to run but were not seen during this run.
437        ///
438        /// This is only set for reruns when some tests from the outstanding set
439        /// did not produce any events.
440        outstanding_not_seen: Option<TestsNotSeen>,
441    },
442}
443
444/// Tests that were expected to run but were not seen during a rerun.
445#[derive(Clone, Debug)]
446pub struct TestsNotSeen {
447    /// A sample of test instance IDs that were not seen, up to a reasonable
448    /// limit.
449    ///
450    /// This uses [`OwnedTestInstanceId`] rather than [`TestInstanceId`]
451    /// because the tests may not be present in the current test list (they
452    /// come from the expected outstanding set from a prior run).
453    pub not_seen: Vec<OwnedTestInstanceId>,
454
455    /// The total number of tests not seen (may exceed `not_seen.len()`).
456    pub total_not_seen: usize,
457}
458
459/// Progress for a stress test.
460#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
461#[serde(tag = "progress-type", rename_all = "kebab-case")]
462#[cfg_attr(test, derive(test_strategy::Arbitrary))]
463pub enum StressProgress {
464    /// This is a count-based stress run.
465    Count {
466        /// The total number of stress runs.
467        total: StressCount,
468
469        /// The total time that has elapsed across all stress runs so far.
470        elapsed: Duration,
471
472        /// The number of stress runs that have been completed.
473        completed: u32,
474    },
475
476    /// This is a time-based stress run.
477    Time {
478        /// The total time for the stress run.
479        total: Duration,
480
481        /// The total time that has elapsed across all stress runs so far.
482        elapsed: Duration,
483
484        /// The number of stress runs that have been completed.
485        completed: u32,
486    },
487}
488
489impl StressProgress {
490    /// Returns the remaining amount of work if the progress indicates there's
491    /// still more to do, otherwise `None`.
492    pub fn remaining(&self) -> Option<StressRemaining> {
493        match self {
494            Self::Count {
495                total: StressCount::Count { count },
496                elapsed: _,
497                completed,
498            } => count
499                .get()
500                .checked_sub(*completed)
501                .and_then(|remaining| NonZero::try_from(remaining).ok())
502                .map(StressRemaining::Count),
503            Self::Count {
504                total: StressCount::Infinite,
505                ..
506            } => Some(StressRemaining::Infinite),
507            Self::Time {
508                total,
509                elapsed,
510                completed: _,
511            } => total.checked_sub(*elapsed).map(StressRemaining::Time),
512        }
513    }
514
515    /// Returns a unique ID for this stress sub-run, consisting of the run ID and stress index.
516    pub fn unique_id(&self, run_id: ReportUuid) -> String {
517        let stress_current = match self {
518            Self::Count { completed, .. } | Self::Time { completed, .. } => *completed,
519        };
520        format!("{}:@stress-{}", run_id, stress_current)
521    }
522}
523
524/// For a stress test, the amount of time or number of stress runs remaining.
525#[derive(Clone, Debug)]
526pub enum StressRemaining {
527    /// The number of stress runs remaining, guaranteed to be non-zero.
528    Count(NonZero<u32>),
529
530    /// Infinite number of stress runs remaining.
531    Infinite,
532
533    /// The amount of time remaining.
534    Time(Duration),
535}
536
537/// The index of the current stress run.
538#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, Deserialize)]
539#[serde(rename_all = "kebab-case")]
540#[cfg_attr(test, derive(test_strategy::Arbitrary))]
541pub struct StressIndex {
542    /// The 0-indexed index.
543    pub current: u32,
544
545    /// The total number of stress runs, if that is available.
546    pub total: Option<NonZero<u32>>,
547}
548
549impl StressIndex {
550    /// Returns the total as a plain `u32`, if available.
551    pub fn total_get(&self) -> Option<u32> {
552        self.total.map(|t| t.get())
553    }
554}
555
556/// Statistics for a completed test run or stress run.
557#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
558#[serde(tag = "type", rename_all = "kebab-case")]
559#[cfg_attr(test, derive(test_strategy::Arbitrary))]
560pub enum RunFinishedStats {
561    /// A single test run was completed.
562    Single(RunStats),
563
564    /// A stress run was completed.
565    Stress(StressRunStats),
566}
567
568impl RunFinishedStats {
569    /// For a single run, returns a summary of statistics as an enum. For a
570    /// stress run, returns a summary for the last sub-run.
571    pub fn final_stats(&self) -> FinalRunStats {
572        match self {
573            Self::Single(stats) => stats.summarize_final(),
574            Self::Stress(stats) => stats.last_final_stats,
575        }
576    }
577}
578
579/// Statistics for a test run.
580#[derive(Copy, Clone, Default, Debug, Eq, PartialEq, Serialize, Deserialize)]
581#[serde(rename_all = "kebab-case")]
582#[cfg_attr(test, derive(test_strategy::Arbitrary))]
583pub struct RunStats {
584    /// The total number of tests that were expected to be run at the beginning.
585    ///
586    /// If the test run is cancelled, this will be more than `finished_count` at the end.
587    pub initial_run_count: usize,
588
589    /// The total number of tests that finished running.
590    pub finished_count: usize,
591
592    /// The total number of setup scripts that were expected to be run at the beginning.
593    ///
594    /// If the test run is cancelled, this will be more than `finished_count` at the end.
595    pub setup_scripts_initial_count: usize,
596
597    /// The total number of setup scripts that finished running.
598    pub setup_scripts_finished_count: usize,
599
600    /// The number of setup scripts that passed.
601    pub setup_scripts_passed: usize,
602
603    /// The number of setup scripts that failed.
604    pub setup_scripts_failed: usize,
605
606    /// The number of setup scripts that encountered an execution failure.
607    pub setup_scripts_exec_failed: usize,
608
609    /// The number of setup scripts that timed out.
610    pub setup_scripts_timed_out: usize,
611
612    /// The number of tests that passed. Includes `passed_slow`, `passed_timed_out`, `flaky`, and
613    /// `leaky`.
614    pub passed: usize,
615
616    /// The number of slow tests that passed.
617    pub passed_slow: usize,
618
619    /// The number of timed out tests that passed.
620    pub passed_timed_out: usize,
621
622    /// The number of tests that passed on retry.
623    pub flaky: usize,
624
625    /// The number of tests that failed. Includes `leaky_failed` and tests that
626    /// were flaky but treated as failed due to `flaky-result = "fail"` configuration.
627    pub failed: usize,
628
629    /// The number of failed tests that were slow.
630    pub failed_slow: usize,
631
632    /// The number of timed out tests that failed.
633    pub failed_timed_out: usize,
634
635    /// The number of tests that passed but leaked handles.
636    pub leaky: usize,
637
638    /// The number of tests that otherwise passed, but leaked handles and were
639    /// treated as failed as a result.
640    ///
641    /// Included in `failed`.
642    pub leaky_failed: usize,
643
644    /// The number of tests that encountered an execution failure.
645    pub exec_failed: usize,
646
647    /// The number of tests that were skipped.
648    pub skipped: usize,
649
650    /// If the run is cancelled, the reason the cancellation is happening.
651    pub cancel_reason: Option<CancelReason>,
652}
653
654impl RunStats {
655    /// Returns true if there are any failures recorded in the stats.
656    pub fn has_failures(&self) -> bool {
657        self.failed_setup_script_count() > 0 || self.failed_count() > 0
658    }
659
660    /// Returns count of setup scripts that did not pass.
661    pub fn failed_setup_script_count(&self) -> usize {
662        self.setup_scripts_failed + self.setup_scripts_exec_failed + self.setup_scripts_timed_out
663    }
664
665    /// Returns count of tests that did not pass.
666    pub fn failed_count(&self) -> usize {
667        self.failed + self.exec_failed + self.failed_timed_out
668    }
669
670    /// Summarizes the stats as an enum at the end of a test run.
671    pub fn summarize_final(&self) -> FinalRunStats {
672        // Check for failures first. The order of setup scripts vs tests should
673        // not be important, though we don't assert that here.
674        if self.failed_setup_script_count() > 0 {
675            // Is this related to a cancellation other than one directly caused
676            // by the failure?
677            if self.cancel_reason > Some(CancelReason::TestFailure) {
678                FinalRunStats::Cancelled {
679                    reason: self.cancel_reason,
680                    kind: RunStatsFailureKind::SetupScript,
681                }
682            } else {
683                FinalRunStats::Failed {
684                    kind: RunStatsFailureKind::SetupScript,
685                }
686            }
687        } else if self.setup_scripts_initial_count > self.setup_scripts_finished_count {
688            FinalRunStats::Cancelled {
689                reason: self.cancel_reason,
690                kind: RunStatsFailureKind::SetupScript,
691            }
692        } else if self.failed_count() > 0 {
693            let kind = RunStatsFailureKind::Test {
694                initial_run_count: self.initial_run_count,
695                not_run: self.initial_run_count.saturating_sub(self.finished_count),
696            };
697
698            // Is this related to a cancellation other than one directly caused
699            // by the failure?
700            if self.cancel_reason > Some(CancelReason::TestFailure) {
701                FinalRunStats::Cancelled {
702                    reason: self.cancel_reason,
703                    kind,
704                }
705            } else {
706                FinalRunStats::Failed { kind }
707            }
708        } else if self.initial_run_count > self.finished_count {
709            FinalRunStats::Cancelled {
710                reason: self.cancel_reason,
711                kind: RunStatsFailureKind::Test {
712                    initial_run_count: self.initial_run_count,
713                    not_run: self.initial_run_count.saturating_sub(self.finished_count),
714                },
715            }
716        } else if self.finished_count == 0 {
717            FinalRunStats::NoTestsRun
718        } else {
719            FinalRunStats::Success
720        }
721    }
722
723    pub(crate) fn on_setup_script_finished(&mut self, status: &SetupScriptExecuteStatus<LiveSpec>) {
724        self.setup_scripts_finished_count += 1;
725
726        match status.result {
727            ExecutionResultDescription::Pass
728            | ExecutionResultDescription::Leak {
729                result: LeakTimeoutResult::Pass,
730            } => {
731                self.setup_scripts_passed += 1;
732            }
733            ExecutionResultDescription::Fail { .. }
734            | ExecutionResultDescription::Leak {
735                result: LeakTimeoutResult::Fail,
736            } => {
737                self.setup_scripts_failed += 1;
738            }
739            ExecutionResultDescription::ExecFail => {
740                self.setup_scripts_exec_failed += 1;
741            }
742            // Timed out setup scripts are always treated as failures.
743            ExecutionResultDescription::Timeout { .. } => {
744                self.setup_scripts_timed_out += 1;
745            }
746        }
747    }
748
749    pub(crate) fn on_test_finished(&mut self, run_statuses: &ExecutionStatuses<LiveSpec>) {
750        self.finished_count += 1;
751        // run_statuses is guaranteed to have at least one element.
752        // * If the last element is success, treat it as success (and possibly flaky).
753        // * If the last element is a failure, use it to determine fail/exec fail.
754        // Note that this is different from what Maven Surefire does (use the first failure):
755        // https://maven.apache.org/surefire/maven-surefire-plugin/examples/rerun-failing-tests.html
756        //
757        // This is not likely to matter much in practice since failures are likely to be of the
758        // same type.
759        let last_status = run_statuses.last_status();
760        match last_status.result {
761            ExecutionResultDescription::Pass => {
762                // The test is flaky if there were multiple attempts. How
763                // it's counted depends on flaky_result — match
764                // exhaustively so the compiler catches new variants.
765                let is_flaky = run_statuses.len() > 1;
766                if is_flaky {
767                    match run_statuses.flaky_result() {
768                        FlakyResult::Fail => {
769                            self.failed += 1;
770                            if last_status.is_slow {
771                                self.failed_slow += 1;
772                            }
773                        }
774                        FlakyResult::Pass => {
775                            self.passed += 1;
776                            if last_status.is_slow {
777                                self.passed_slow += 1;
778                            }
779                            self.flaky += 1;
780                        }
781                    }
782                } else {
783                    self.passed += 1;
784                    if last_status.is_slow {
785                        self.passed_slow += 1;
786                    }
787                }
788            }
789            ExecutionResultDescription::Leak {
790                result: LeakTimeoutResult::Pass,
791            } => {
792                let is_flaky = run_statuses.len() > 1;
793                if is_flaky {
794                    match run_statuses.flaky_result() {
795                        FlakyResult::Fail => {
796                            self.failed += 1;
797                            if last_status.is_slow {
798                                self.failed_slow += 1;
799                            }
800                            // Still count as leaky since the leak was detected.
801                            self.leaky += 1;
802                        }
803                        FlakyResult::Pass => {
804                            self.passed += 1;
805                            self.leaky += 1;
806                            if last_status.is_slow {
807                                self.passed_slow += 1;
808                            }
809                            self.flaky += 1;
810                        }
811                    }
812                } else {
813                    self.passed += 1;
814                    self.leaky += 1;
815                    if last_status.is_slow {
816                        self.passed_slow += 1;
817                    }
818                }
819            }
820            ExecutionResultDescription::Leak {
821                result: LeakTimeoutResult::Fail,
822            } => {
823                self.failed += 1;
824                self.leaky_failed += 1;
825                if last_status.is_slow {
826                    self.failed_slow += 1;
827                }
828            }
829            ExecutionResultDescription::Fail { .. } => {
830                self.failed += 1;
831                if last_status.is_slow {
832                    self.failed_slow += 1;
833                }
834            }
835            ExecutionResultDescription::Timeout {
836                result: SlowTimeoutResult::Pass,
837            } => {
838                let is_flaky = run_statuses.len() > 1;
839                if is_flaky {
840                    match run_statuses.flaky_result() {
841                        FlakyResult::Fail => {
842                            self.failed += 1;
843                            // Track as failed_slow since the overall result
844                            // is failure.
845                            if last_status.is_slow {
846                                self.failed_slow += 1;
847                            }
848                        }
849                        FlakyResult::Pass => {
850                            self.passed += 1;
851                            self.passed_timed_out += 1;
852                            self.flaky += 1;
853                        }
854                    }
855                } else {
856                    self.passed += 1;
857                    self.passed_timed_out += 1;
858                }
859            }
860            ExecutionResultDescription::Timeout {
861                result: SlowTimeoutResult::Fail,
862            } => {
863                self.failed_timed_out += 1;
864            }
865            ExecutionResultDescription::ExecFail => self.exec_failed += 1,
866        }
867    }
868}
869
870/// A type summarizing the possible outcomes of a test run.
871#[derive(Copy, Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
872#[serde(tag = "outcome", rename_all = "kebab-case")]
873#[cfg_attr(test, derive(test_strategy::Arbitrary))]
874pub enum FinalRunStats {
875    /// The test run was successful, or is successful so far.
876    Success,
877
878    /// The test run was successful, or is successful so far, but no tests were selected to run.
879    NoTestsRun,
880
881    /// The test run was cancelled.
882    Cancelled {
883        /// The reason for cancellation, if available.
884        ///
885        /// This should generally be available, but may be None if some tests
886        /// that were selected to run were not executed.
887        reason: Option<CancelReason>,
888
889        /// The kind of failure that occurred.
890        kind: RunStatsFailureKind,
891    },
892
893    /// At least one test failed.
894    Failed {
895        /// The kind of failure that occurred.
896        kind: RunStatsFailureKind,
897    },
898}
899
900/// Statistics for a stress run.
901#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
902#[serde(rename_all = "kebab-case")]
903#[cfg_attr(test, derive(test_strategy::Arbitrary))]
904pub struct StressRunStats {
905    /// The number of stress runs completed.
906    pub completed: StressIndex,
907
908    /// The number of stress runs that succeeded.
909    pub success_count: u32,
910
911    /// The number of stress runs that failed.
912    pub failed_count: u32,
913
914    /// The last stress run's `FinalRunStats`.
915    pub last_final_stats: FinalRunStats,
916}
917
918impl StressRunStats {
919    /// Summarizes the stats as an enum at the end of a test run.
920    pub fn summarize_final(&self) -> StressFinalRunStats {
921        if self.failed_count > 0 {
922            StressFinalRunStats::Failed
923        } else if matches!(self.last_final_stats, FinalRunStats::Cancelled { .. }) {
924            StressFinalRunStats::Cancelled
925        } else if matches!(self.last_final_stats, FinalRunStats::NoTestsRun) {
926            StressFinalRunStats::NoTestsRun
927        } else {
928            StressFinalRunStats::Success
929        }
930    }
931}
932
933/// A summary of final statistics for a stress run.
934pub enum StressFinalRunStats {
935    /// The stress run was successful.
936    Success,
937
938    /// No tests were run.
939    NoTestsRun,
940
941    /// The stress run was cancelled.
942    Cancelled,
943
944    /// At least one stress run failed.
945    Failed,
946}
947
948/// A type summarizing the step at which a test run failed.
949#[derive(Copy, Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
950#[serde(tag = "step", rename_all = "kebab-case")]
951#[cfg_attr(test, derive(test_strategy::Arbitrary))]
952pub enum RunStatsFailureKind {
953    /// The run was interrupted during setup script execution.
954    SetupScript,
955
956    /// The run was interrupted during test execution.
957    Test {
958        /// The total number of tests scheduled.
959        initial_run_count: usize,
960
961        /// The number of tests not run, or for a currently-executing test the number queued up to
962        /// run.
963        not_run: usize,
964    },
965}
966
967/// Information about executions of a test, including retries.
968///
969/// The type parameter `S` specifies how test output is stored (see
970/// [`OutputSpec`]).
971#[derive_where::derive_where(Clone, Debug, PartialEq, Eq; S::ChildOutputDesc)]
972#[derive(Serialize)]
973#[serde(
974    rename_all = "kebab-case",
975    bound(serialize = "S: SerializableOutputSpec")
976)]
977#[cfg_attr(
978    test,
979    derive(test_strategy::Arbitrary),
980    arbitrary(bound(S: ArbitraryOutputSpec))
981)]
982pub struct ExecutionStatuses<S: OutputSpec> {
983    /// This is guaranteed to be non-empty.
984    #[cfg_attr(test, strategy(proptest::collection::vec(proptest::arbitrary::any::<ExecuteStatus<S>>(), 1..=3)))]
985    statuses: Vec<ExecuteStatus<S>>,
986
987    /// Controls whether a flaky test is treated as a pass or a failure.
988    #[serde(default)]
989    flaky_result: FlakyResult,
990}
991
992impl<'de, S: SerializableOutputSpec> Deserialize<'de> for ExecutionStatuses<S> {
993    fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
994        // Deserialize as the wrapper struct that matches the Serialize output.
995        // S is already bound as SerializableOutputSpec on this impl.
996        #[derive(Deserialize)]
997        #[serde(
998            rename_all = "kebab-case",
999            bound(deserialize = "S: SerializableOutputSpec")
1000        )]
1001        struct Helper<S: OutputSpec> {
1002            statuses: Vec<ExecuteStatus<S>>,
1003            #[serde(default)]
1004            flaky_result: FlakyResult,
1005        }
1006
1007        let helper = Helper::<S>::deserialize(deserializer)?;
1008        if helper.statuses.is_empty() {
1009            return Err(serde::de::Error::custom("expected non-empty statuses"));
1010        }
1011        Ok(Self {
1012            statuses: helper.statuses,
1013            flaky_result: helper.flaky_result,
1014        })
1015    }
1016}
1017
1018#[expect(clippy::len_without_is_empty)] // RunStatuses is never empty
1019impl<S: OutputSpec> ExecutionStatuses<S> {
1020    pub(crate) fn new(statuses: Vec<ExecuteStatus<S>>, flaky_result: FlakyResult) -> Self {
1021        debug_assert!(!statuses.is_empty(), "ExecutionStatuses must be non-empty");
1022        Self {
1023            statuses,
1024            flaky_result,
1025        }
1026    }
1027
1028    /// Returns the configured flaky result for this test.
1029    pub fn flaky_result(&self) -> FlakyResult {
1030        self.flaky_result
1031    }
1032
1033    /// Returns the last execution status.
1034    ///
1035    /// This status is typically used as the final result.
1036    pub fn last_status(&self) -> &ExecuteStatus<S> {
1037        self.statuses
1038            .last()
1039            .expect("execution statuses is non-empty")
1040    }
1041
1042    /// Iterates over all the statuses.
1043    pub fn iter(&self) -> impl DoubleEndedIterator<Item = &'_ ExecuteStatus<S>> + '_ {
1044        self.statuses.iter()
1045    }
1046
1047    /// Returns the number of times the test was executed.
1048    pub fn len(&self) -> usize {
1049        self.statuses.len()
1050    }
1051
1052    /// Returns a description of self.
1053    pub fn describe(&self) -> ExecutionDescription<'_, S> {
1054        let last_status = self.last_status();
1055        if last_status.result.is_success() {
1056            if self.statuses.len() > 1 {
1057                ExecutionDescription::Flaky {
1058                    last_status,
1059                    prior_statuses: &self.statuses[..self.statuses.len() - 1],
1060                    result: self.flaky_result,
1061                }
1062            } else {
1063                ExecutionDescription::Success {
1064                    single_status: last_status,
1065                }
1066            }
1067        } else {
1068            let first_status = self
1069                .statuses
1070                .first()
1071                .expect("execution statuses is non-empty");
1072            let retries = &self.statuses[1..];
1073            ExecutionDescription::Failure {
1074                first_status,
1075                last_status,
1076                retries,
1077            }
1078        }
1079    }
1080}
1081
1082impl<S: OutputSpec> IntoIterator for ExecutionStatuses<S> {
1083    type Item = ExecuteStatus<S>;
1084    type IntoIter = std::vec::IntoIter<ExecuteStatus<S>>;
1085
1086    fn into_iter(self) -> Self::IntoIter {
1087        self.statuses.into_iter()
1088    }
1089}
1090
1091/// A description of test executions obtained from `ExecuteStatuses`.
1092///
1093/// This can be used to quickly determine whether a test passed, failed or was flaky.
1094///
1095/// The type parameter `S` specifies how test output is stored (see
1096/// [`OutputSpec`]).
1097#[derive_where::derive_where(Debug; S::ChildOutputDesc)]
1098pub enum ExecutionDescription<'a, S: OutputSpec> {
1099    /// The test was run once and was successful.
1100    Success {
1101        /// The status of the test.
1102        single_status: &'a ExecuteStatus<S>,
1103    },
1104
1105    /// The test was run more than once. The final result was successful.
1106    Flaky {
1107        /// The last, successful status.
1108        last_status: &'a ExecuteStatus<S>,
1109
1110        /// Previous statuses, none of which are successes.
1111        prior_statuses: &'a [ExecuteStatus<S>],
1112
1113        /// Controls whether this flaky test is treated as a pass or a failure.
1114        result: FlakyResult,
1115    },
1116
1117    /// The test was run once, or possibly multiple times. All runs failed.
1118    Failure {
1119        /// The first, failing status.
1120        first_status: &'a ExecuteStatus<S>,
1121
1122        /// The last, failing status. Same as the first status if no retries were performed.
1123        last_status: &'a ExecuteStatus<S>,
1124
1125        /// Any retries that were performed. All of these runs failed.
1126        ///
1127        /// May be empty.
1128        retries: &'a [ExecuteStatus<S>],
1129    },
1130}
1131
1132// Manual Copy and Clone implementations to avoid requiring S::ChildOutputDesc:
1133// Copy/Clone, since ExecutionDescription only stores references.
1134impl<S: OutputSpec> Clone for ExecutionDescription<'_, S> {
1135    fn clone(&self) -> Self {
1136        *self
1137    }
1138}
1139
1140impl<S: OutputSpec> Copy for ExecutionDescription<'_, S> {}
1141
1142impl<'a, S: OutputSpec> ExecutionDescription<'a, S> {
1143    /// Returns the status level for this `ExecutionDescription`.
1144    pub fn status_level(&self) -> StatusLevel {
1145        match self {
1146            ExecutionDescription::Success { single_status } => match single_status.result {
1147                ExecutionResultDescription::Leak {
1148                    result: LeakTimeoutResult::Pass,
1149                } => StatusLevel::Leak,
1150                ExecutionResultDescription::Pass => StatusLevel::Pass,
1151                ExecutionResultDescription::Timeout {
1152                    result: SlowTimeoutResult::Pass,
1153                } => StatusLevel::Slow,
1154                ref other => unreachable!(
1155                    "Success only permits Pass, Leak Pass, or Timeout Pass, found {other:?}"
1156                ),
1157            },
1158            // A flaky test implies that we print out retry information for it.
1159            ExecutionDescription::Flaky {
1160                result: FlakyResult::Pass,
1161                ..
1162            } => StatusLevel::Retry,
1163            ExecutionDescription::Flaky {
1164                result: FlakyResult::Fail,
1165                ..
1166            } => StatusLevel::Fail,
1167            ExecutionDescription::Failure { .. } => StatusLevel::Fail,
1168        }
1169    }
1170
1171    /// Returns the final status level for this `ExecutionDescription`.
1172    pub fn final_status_level(&self) -> FinalStatusLevel {
1173        match self {
1174            ExecutionDescription::Success { single_status, .. } => {
1175                // Slow is higher priority than leaky, so return slow first here.
1176                if single_status.is_slow {
1177                    FinalStatusLevel::Slow
1178                } else {
1179                    match single_status.result {
1180                        ExecutionResultDescription::Pass => FinalStatusLevel::Pass,
1181                        ExecutionResultDescription::Leak {
1182                            result: LeakTimeoutResult::Pass,
1183                        } => FinalStatusLevel::Leak,
1184                        // Timeout with Pass should return Slow, but this case
1185                        // shouldn't be reached because is_slow is true for
1186                        // timeout scenarios. Handle it for completeness.
1187                        ExecutionResultDescription::Timeout {
1188                            result: SlowTimeoutResult::Pass,
1189                        } => FinalStatusLevel::Slow,
1190                        ref other => unreachable!(
1191                            "Success only permits Pass, Leak Pass, or Timeout Pass, found {other:?}"
1192                        ),
1193                    }
1194                }
1195            }
1196            // A flaky-pass test implies that we print out retry information.
1197            ExecutionDescription::Flaky {
1198                result: FlakyResult::Pass,
1199                ..
1200            } => FinalStatusLevel::Flaky,
1201            // A flaky-fail test is treated as a failure.
1202            ExecutionDescription::Flaky {
1203                result: FlakyResult::Fail,
1204                ..
1205            } => FinalStatusLevel::Fail,
1206            ExecutionDescription::Failure { .. } => FinalStatusLevel::Fail,
1207        }
1208    }
1209
1210    /// Returns whether this test's output should be treated as success output
1211    /// for display and storage purposes.
1212    ///
1213    /// For flaky tests (both pass and fail variants), the last attempt
1214    /// succeeded, so its output is success output: it contains no panics or
1215    /// errors, and is generally not interesting. The failure information comes
1216    /// from the status line and from prior retry attempts' output (shown via
1217    /// `TestAttemptFailedWillRetry` events, controlled by `failure-output`).
1218    ///
1219    /// This means:
1220    /// - The _visibility_ is controlled by `success-output`.
1221    /// - The _styling_ is pass/green headers, no error extraction.
1222    /// - _JUnit storage_ is controlled by `store-success-output` (default:
1223    ///   `false`).
1224    ///
1225    /// The status line uses failure semantics independently (e.g. `FLKY-FL` in
1226    /// red for flaky-fail tests).
1227    pub fn is_success_for_output(&self) -> bool {
1228        match self {
1229            ExecutionDescription::Success { .. } => true,
1230            // All flaky tests have a successful last attempt — the output
1231            // from that attempt is success output regardless of the overall
1232            // test outcome.
1233            ExecutionDescription::Flaky { .. } => true,
1234            ExecutionDescription::Failure { .. } => false,
1235        }
1236    }
1237
1238    /// Returns the last run status.
1239    pub fn last_status(&self) -> &'a ExecuteStatus<S> {
1240        match self {
1241            ExecutionDescription::Success {
1242                single_status: last_status,
1243            }
1244            | ExecutionDescription::Flaky { last_status, .. }
1245            | ExecutionDescription::Failure { last_status, .. } => last_status,
1246        }
1247    }
1248}
1249
1250/// Pre-computed error summary for display.
1251///
1252/// This contains the formatted error messages, pre-computed from the execution
1253/// output and result. Useful for record-replay scenarios where the rendering
1254/// is done on the server.
1255#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
1256#[serde(rename_all = "kebab-case")]
1257#[cfg_attr(test, derive(test_strategy::Arbitrary))]
1258pub struct ErrorSummary {
1259    /// A short summary of the error, suitable for display in a single line.
1260    pub short_message: String,
1261
1262    /// A full description of the error chain, suitable for detailed display.
1263    pub description: String,
1264}
1265
1266/// Pre-computed output error slice for display.
1267///
1268/// This contains an error message heuristically extracted from test output,
1269/// such as a panic message or error string.
1270#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
1271#[serde(rename_all = "kebab-case")]
1272#[cfg_attr(test, derive(test_strategy::Arbitrary))]
1273pub struct OutputErrorSlice {
1274    /// The extracted error slice as a string.
1275    pub slice: String,
1276
1277    /// The byte offset in the original output where this slice starts.
1278    pub start: usize,
1279}
1280
1281/// Information about a single execution of a test.
1282///
1283/// This is the external-facing type used by reporters. The `result` field uses
1284/// [`ExecutionResultDescription`], a platform-independent type that can be
1285/// serialized and deserialized across platforms.
1286///
1287/// The type parameter `S` specifies how test output is stored (see
1288/// [`OutputSpec`]).
1289#[derive_where::derive_where(Clone, Debug, PartialEq, Eq; S::ChildOutputDesc)]
1290#[derive(Serialize, Deserialize)]
1291#[serde(
1292    rename_all = "kebab-case",
1293    bound(
1294        serialize = "S: SerializableOutputSpec",
1295        deserialize = "S: SerializableOutputSpec"
1296    )
1297)]
1298#[cfg_attr(
1299    test,
1300    derive(test_strategy::Arbitrary),
1301    arbitrary(bound(S: ArbitraryOutputSpec))
1302)]
1303pub struct ExecuteStatus<S: OutputSpec> {
1304    /// Retry-related data.
1305    pub retry_data: RetryData,
1306    /// The stdout and stderr output for this test.
1307    pub output: ChildExecutionOutputDescription<S>,
1308    /// The execution result for this test: pass, fail or execution error.
1309    pub result: ExecutionResultDescription,
1310    /// The time at which the test started.
1311    #[cfg_attr(
1312        test,
1313        strategy(crate::reporter::test_helpers::arb_datetime_fixed_offset())
1314    )]
1315    pub start_time: DateTime<FixedOffset>,
1316    /// The time it took for the test to run.
1317    #[cfg_attr(test, strategy(crate::reporter::test_helpers::arb_duration()))]
1318    pub time_taken: Duration,
1319    /// Whether this test counts as slow.
1320    pub is_slow: bool,
1321    /// The delay will be non-zero if this is a retry and delay was specified.
1322    #[cfg_attr(test, strategy(crate::reporter::test_helpers::arb_duration()))]
1323    pub delay_before_start: Duration,
1324    /// Pre-computed error summary, if available.
1325    ///
1326    /// This is computed from the execution output and result, and can be used
1327    /// for display without needing to re-compute the error chain.
1328    pub error_summary: Option<ErrorSummary>,
1329    /// Pre-computed output error slice, if available.
1330    ///
1331    /// This is a heuristically extracted error message from the test output,
1332    /// such as a panic message or error string.
1333    pub output_error_slice: Option<OutputErrorSlice>,
1334}
1335
1336/// Information about the execution of a setup script.
1337///
1338/// This is the external-facing type used by reporters. The `result` field uses
1339/// [`ExecutionResultDescription`], a platform-independent type that can be
1340/// serialized and deserialized across platforms.
1341///
1342/// The type parameter `S` specifies how test output is stored (see
1343/// [`OutputSpec`]).
1344#[derive_where::derive_where(Clone, Debug, PartialEq, Eq; S::ChildOutputDesc)]
1345#[derive(Serialize, Deserialize)]
1346#[serde(
1347    rename_all = "kebab-case",
1348    bound(
1349        serialize = "S: SerializableOutputSpec",
1350        deserialize = "S: SerializableOutputSpec"
1351    )
1352)]
1353#[cfg_attr(
1354    test,
1355    derive(test_strategy::Arbitrary),
1356    arbitrary(bound(S: ArbitraryOutputSpec))
1357)]
1358pub struct SetupScriptExecuteStatus<S: OutputSpec> {
1359    /// Output for this setup script.
1360    pub output: ChildExecutionOutputDescription<S>,
1361
1362    /// The execution result for this setup script: pass, fail or execution error.
1363    pub result: ExecutionResultDescription,
1364
1365    /// The time at which the script started.
1366    #[cfg_attr(
1367        test,
1368        strategy(crate::reporter::test_helpers::arb_datetime_fixed_offset())
1369    )]
1370    pub start_time: DateTime<FixedOffset>,
1371
1372    /// The time it took for the script to run.
1373    #[cfg_attr(test, strategy(crate::reporter::test_helpers::arb_duration()))]
1374    pub time_taken: Duration,
1375
1376    /// Whether this script counts as slow.
1377    pub is_slow: bool,
1378
1379    /// The map of environment variables that were set by this script.
1380    ///
1381    /// `None` if an error occurred while running the script or reading the
1382    /// environment map.
1383    pub env_map: Option<SetupScriptEnvMap>,
1384
1385    /// Pre-computed error summary, if available.
1386    ///
1387    /// This is computed from the execution output and result, and can be used
1388    /// for display without needing to re-compute the error chain.
1389    pub error_summary: Option<ErrorSummary>,
1390}
1391
1392/// A map of environment variables set by a setup script.
1393///
1394/// Part of [`SetupScriptExecuteStatus`].
1395#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
1396#[serde(rename_all = "kebab-case")]
1397#[cfg_attr(test, derive(test_strategy::Arbitrary))]
1398pub struct SetupScriptEnvMap {
1399    /// The map of environment variables set by the script.
1400    pub env_map: BTreeMap<String, String>,
1401}
1402
1403// ---
1404// Child execution output description types
1405// ---
1406
1407/// The result of executing a child process, generic over output storage.
1408///
1409/// This is the external-facing counterpart to [`ChildExecutionOutput`]. The
1410/// type parameter `S` specifies how output is stored (see [`OutputSpec`]).
1411#[derive_where::derive_where(Clone, Debug, PartialEq, Eq; S::ChildOutputDesc)]
1412#[derive(Serialize, Deserialize)]
1413#[serde(
1414    tag = "type",
1415    rename_all = "kebab-case",
1416    bound(
1417        serialize = "S: SerializableOutputSpec",
1418        deserialize = "S: SerializableOutputSpec"
1419    )
1420)]
1421#[cfg_attr(
1422    test,
1423    derive(test_strategy::Arbitrary),
1424    arbitrary(bound(S: ArbitraryOutputSpec))
1425)]
1426pub enum ChildExecutionOutputDescription<S: OutputSpec> {
1427    /// The process was run and the output was captured.
1428    Output {
1429        /// If the process has finished executing, the final state it is in.
1430        ///
1431        /// `None` means execution is currently in progress.
1432        result: Option<ExecutionResultDescription>,
1433
1434        /// The captured output.
1435        output: S::ChildOutputDesc,
1436
1437        /// Errors that occurred while waiting on the child process or parsing
1438        /// its output.
1439        errors: Option<ErrorList<ChildErrorDescription>>,
1440    },
1441
1442    /// There was a failure to start the process.
1443    StartError(ChildStartErrorDescription),
1444}
1445
1446impl<S: OutputSpec> ChildExecutionOutputDescription<S> {
1447    /// Returns true if there are any errors in this output.
1448    pub fn has_errors(&self) -> bool {
1449        match self {
1450            Self::Output { errors, result, .. } => {
1451                if errors.is_some() {
1452                    return true;
1453                }
1454                if let Some(result) = result {
1455                    return !result.is_success();
1456                }
1457                false
1458            }
1459            Self::StartError(_) => true,
1460        }
1461    }
1462}
1463
1464/// The output of a child process during live execution.
1465///
1466/// This represents either split stdout/stderr or combined output. The `Option`
1467/// wrappers distinguish between "not captured" (`None`) and "captured but
1468/// empty" (`Some` with empty content).
1469///
1470/// The `NotLoaded` variant is used during replay when the display
1471/// configuration indicates that output won't be shown.
1472///
1473/// For the recording counterpart, see
1474/// [`ZipStoreOutputDescription`](crate::record::ZipStoreOutputDescription).
1475#[derive(Clone, Debug)]
1476pub enum ChildOutputDescription {
1477    /// The output was split into stdout and stderr.
1478    Split {
1479        /// Standard output, or `None` if not captured.
1480        stdout: Option<ChildSingleOutput>,
1481        /// Standard error, or `None` if not captured.
1482        stderr: Option<ChildSingleOutput>,
1483    },
1484
1485    /// The output was combined into a single stream.
1486    Combined {
1487        /// The combined output.
1488        output: ChildSingleOutput,
1489    },
1490
1491    /// Output exists but was not loaded.
1492    ///
1493    /// This variant is used during replay when the display configuration
1494    /// indicates that output won't be shown. Code that accesses output
1495    /// bytes must never be reached with this variant.
1496    NotLoaded,
1497}
1498
1499impl ChildOutputDescription {
1500    /// Returns the lengths of stdout and stderr in bytes.
1501    ///
1502    /// Returns `None` for each stream that wasn't captured.
1503    pub fn stdout_stderr_len(&self) -> (Option<u64>, Option<u64>) {
1504        match self {
1505            Self::Split { stdout, stderr } => (
1506                stdout.as_ref().map(|s| s.buf().len() as u64),
1507                stderr.as_ref().map(|s| s.buf().len() as u64),
1508            ),
1509            Self::Combined { output } => (Some(output.buf().len() as u64), None),
1510            Self::NotLoaded => {
1511                unreachable!(
1512                    "attempted to get output lengths from output that was not loaded \
1513                     (this method is only called from the live runner, where NotLoaded \
1514                     is never produced)"
1515                );
1516            }
1517        }
1518    }
1519}
1520
1521/// A serializable description of an error that occurred while starting a child process.
1522///
1523/// This is the external-facing counterpart to [`ChildStartError`].
1524#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
1525#[serde(tag = "kind", rename_all = "kebab-case")]
1526#[cfg_attr(test, derive(test_strategy::Arbitrary))]
1527pub enum ChildStartErrorDescription {
1528    /// An error occurred while creating a temporary path for a setup script.
1529    TempPath {
1530        /// The source error.
1531        source: SerializableError,
1532    },
1533
1534    /// An error occurred while spawning the child process.
1535    Spawn {
1536        /// The source error.
1537        source: SerializableError,
1538    },
1539}
1540
1541impl fmt::Display for ChildStartErrorDescription {
1542    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1543        match self {
1544            Self::TempPath { .. } => {
1545                write!(f, "error creating temporary path for setup script")
1546            }
1547            Self::Spawn { .. } => write!(f, "error spawning child process"),
1548        }
1549    }
1550}
1551
1552impl std::error::Error for ChildStartErrorDescription {
1553    fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
1554        match self {
1555            Self::TempPath { source } | Self::Spawn { source } => Some(source),
1556        }
1557    }
1558}
1559
1560/// A serializable description of an error that occurred while managing a child process.
1561///
1562/// This is the external-facing counterpart to [`ChildError`].
1563#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
1564#[serde(tag = "kind", rename_all = "kebab-case")]
1565#[cfg_attr(test, derive(test_strategy::Arbitrary))]
1566pub enum ChildErrorDescription {
1567    /// An error occurred while reading standard output.
1568    ReadStdout {
1569        /// The source error.
1570        source: SerializableError,
1571    },
1572
1573    /// An error occurred while reading standard error.
1574    ReadStderr {
1575        /// The source error.
1576        source: SerializableError,
1577    },
1578
1579    /// An error occurred while reading combined output.
1580    ReadCombined {
1581        /// The source error.
1582        source: SerializableError,
1583    },
1584
1585    /// An error occurred while waiting for the child process to exit.
1586    Wait {
1587        /// The source error.
1588        source: SerializableError,
1589    },
1590
1591    /// An error occurred while reading the output of a setup script.
1592    SetupScriptOutput {
1593        /// The source error.
1594        source: SerializableError,
1595    },
1596}
1597
1598impl fmt::Display for ChildErrorDescription {
1599    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1600        match self {
1601            Self::ReadStdout { .. } => write!(f, "error reading standard output"),
1602            Self::ReadStderr { .. } => write!(f, "error reading standard error"),
1603            Self::ReadCombined { .. } => {
1604                write!(f, "error reading combined stream")
1605            }
1606            Self::Wait { .. } => {
1607                write!(f, "error waiting for child process to exit")
1608            }
1609            Self::SetupScriptOutput { .. } => {
1610                write!(f, "error reading setup script output")
1611            }
1612        }
1613    }
1614}
1615
1616impl std::error::Error for ChildErrorDescription {
1617    fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
1618        match self {
1619            Self::ReadStdout { source }
1620            | Self::ReadStderr { source }
1621            | Self::ReadCombined { source }
1622            | Self::Wait { source }
1623            | Self::SetupScriptOutput { source } => Some(source),
1624        }
1625    }
1626}
1627
1628/// A serializable representation of an error chain.
1629///
1630/// This captures the error message and the chain of source errors from
1631/// any [`std::error::Error`] implementation.
1632#[derive(Clone, Debug, PartialEq, Eq)]
1633pub struct SerializableError {
1634    message: String,
1635    source: Option<Box<SerializableError>>,
1636}
1637
1638impl SerializableError {
1639    /// Creates a new `SerializableError` from an error, walking the
1640    /// full source chain.
1641    pub fn new(error: &dyn std::error::Error) -> Self {
1642        let message = error.to_string();
1643        let mut causes = Vec::new();
1644        let mut source = error.source();
1645        while let Some(err) = source {
1646            causes.push(err.to_string());
1647            source = err.source();
1648        }
1649        Self::from_message_and_causes(message, causes)
1650    }
1651
1652    /// Creates a new `SerializableError` from a message and a list of
1653    /// causes.
1654    pub fn from_message_and_causes(message: String, causes: Vec<String>) -> Self {
1655        // This builds a singly-linked list from the causes. You rarely
1656        // see them in Rust, but they're required to implement
1657        // Error::source.
1658        let mut next = None;
1659        for cause in causes.into_iter().rev() {
1660            let error = Self {
1661                message: cause,
1662                source: next.map(Box::new),
1663            };
1664            next = Some(error);
1665        }
1666        Self {
1667            message,
1668            source: next.map(Box::new),
1669        }
1670    }
1671
1672    /// Returns the message associated with this error.
1673    pub fn message(&self) -> &str {
1674        &self.message
1675    }
1676
1677    /// Returns the causes of this error as an iterator.
1678    pub fn sources(&self) -> SerializableErrorSources<'_> {
1679        SerializableErrorSources {
1680            current: self.source.as_deref(),
1681        }
1682    }
1683}
1684
1685impl fmt::Display for SerializableError {
1686    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1687        f.write_str(&self.message)
1688    }
1689}
1690
1691impl std::error::Error for SerializableError {
1692    fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
1693        self.source
1694            .as_deref()
1695            .map(|s| s as &(dyn std::error::Error + 'static))
1696    }
1697}
1698
1699/// The sources of a [`SerializableError`] as an iterator.
1700#[derive(Debug)]
1701pub struct SerializableErrorSources<'a> {
1702    current: Option<&'a SerializableError>,
1703}
1704
1705impl<'a> Iterator for SerializableErrorSources<'a> {
1706    type Item = &'a SerializableError;
1707
1708    fn next(&mut self) -> Option<Self::Item> {
1709        let current = self.current?;
1710        self.current = current.source.as_deref();
1711        Some(current)
1712    }
1713}
1714
1715mod serializable_error_serde {
1716    use super::*;
1717
1718    #[derive(Serialize, Deserialize)]
1719    struct Ser {
1720        message: String,
1721        // For backwards compatibility with IoErrorDescription, which
1722        // didn't have a causes field.
1723        #[serde(default)]
1724        causes: Vec<String>,
1725    }
1726
1727    impl Serialize for SerializableError {
1728        fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
1729            let mut causes = Vec::new();
1730            let mut cause = self.source.as_ref();
1731            while let Some(c) = cause {
1732                causes.push(c.message.clone());
1733                cause = c.source.as_ref();
1734            }
1735
1736            let ser = Ser {
1737                message: self.message.clone(),
1738                causes,
1739            };
1740            ser.serialize(serializer)
1741        }
1742    }
1743
1744    impl<'de> Deserialize<'de> for SerializableError {
1745        fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
1746            let ser = Ser::deserialize(deserializer)?;
1747            Ok(SerializableError::from_message_and_causes(
1748                ser.message,
1749                ser.causes,
1750            ))
1751        }
1752    }
1753}
1754
1755#[cfg(test)]
1756mod serializable_error_arbitrary {
1757    use super::*;
1758    use proptest::prelude::*;
1759
1760    impl Arbitrary for SerializableError {
1761        type Parameters = ();
1762        type Strategy = BoxedStrategy<Self>;
1763
1764        fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {
1765            (
1766                any::<String>(),
1767                proptest::collection::vec(any::<String>(), 0..3),
1768            )
1769                .prop_map(|(message, causes)| {
1770                    SerializableError::from_message_and_causes(message, causes)
1771                })
1772                .boxed()
1773        }
1774    }
1775}
1776
1777impl From<ChildExecutionOutput> for ChildExecutionOutputDescription<LiveSpec> {
1778    fn from(output: ChildExecutionOutput) -> Self {
1779        match output {
1780            ChildExecutionOutput::Output {
1781                result,
1782                output,
1783                errors,
1784            } => Self::Output {
1785                result: result.map(ExecutionResultDescription::from),
1786                output: ChildOutputDescription::from(output),
1787                errors: errors.map(|e| e.map(ChildErrorDescription::from)),
1788            },
1789            ChildExecutionOutput::StartError(error) => {
1790                Self::StartError(ChildStartErrorDescription::from(error))
1791            }
1792        }
1793    }
1794}
1795
1796impl From<ChildOutput> for ChildOutputDescription {
1797    fn from(output: ChildOutput) -> Self {
1798        match output {
1799            ChildOutput::Split(split) => Self::Split {
1800                stdout: split.stdout,
1801                stderr: split.stderr,
1802            },
1803            ChildOutput::Combined { output } => Self::Combined { output },
1804        }
1805    }
1806}
1807
1808impl From<ChildStartError> for ChildStartErrorDescription {
1809    fn from(error: ChildStartError) -> Self {
1810        match error {
1811            ChildStartError::TempPath(e) => Self::TempPath {
1812                source: SerializableError::new(&*e),
1813            },
1814            ChildStartError::Spawn(e) => Self::Spawn {
1815                source: SerializableError::new(&*e),
1816            },
1817        }
1818    }
1819}
1820
1821impl From<ChildError> for ChildErrorDescription {
1822    fn from(error: ChildError) -> Self {
1823        match error {
1824            ChildError::Fd(ChildFdError::ReadStdout(e)) => Self::ReadStdout {
1825                source: SerializableError::new(&*e),
1826            },
1827            ChildError::Fd(ChildFdError::ReadStderr(e)) => Self::ReadStderr {
1828                source: SerializableError::new(&*e),
1829            },
1830            ChildError::Fd(ChildFdError::ReadCombined(e)) => Self::ReadCombined {
1831                source: SerializableError::new(&*e),
1832            },
1833            ChildError::Fd(ChildFdError::Wait(e)) => Self::Wait {
1834                source: SerializableError::new(&*e),
1835            },
1836            ChildError::SetupScriptOutput(e) => Self::SetupScriptOutput {
1837                source: SerializableError::new(&e),
1838            },
1839        }
1840    }
1841}
1842
1843/// Data related to retries for a test.
1844#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize)]
1845#[serde(rename_all = "kebab-case")]
1846#[cfg_attr(test, derive(test_strategy::Arbitrary))]
1847pub struct RetryData {
1848    /// The current attempt. In the range `[1, total_attempts]`.
1849    pub attempt: u32,
1850
1851    /// The total number of times this test can be run. Equal to `1 + retries`.
1852    pub total_attempts: u32,
1853}
1854
1855impl RetryData {
1856    /// Returns true if there are no more attempts after this.
1857    pub fn is_last_attempt(&self) -> bool {
1858        self.attempt >= self.total_attempts
1859    }
1860}
1861
1862/// Whether a test passed, failed or an error occurred while executing the test.
1863#[derive(Copy, Clone, Debug, Eq, PartialEq)]
1864pub enum ExecutionResult {
1865    /// The test passed.
1866    Pass,
1867    /// The test passed but leaked handles. This usually indicates that
1868    /// a subprocess that inherit standard IO was created, but it didn't shut down when
1869    /// the test failed.
1870    Leak {
1871        /// Whether this leak was treated as a failure.
1872        ///
1873        /// Note the difference between `Fail { leaked: true }` and `Leak {
1874        /// failed: true }`. In the former case, the test failed and also leaked
1875        /// handles. In the latter case, the test passed but leaked handles, and
1876        /// configuration indicated that this is a failure.
1877        result: LeakTimeoutResult,
1878    },
1879    /// The test failed.
1880    Fail {
1881        /// The abort status of the test, if any (for example, the signal on Unix).
1882        failure_status: FailureStatus,
1883
1884        /// Whether a test leaked handles. If set to true, this usually indicates that
1885        /// a subprocess that inherit standard IO was created, but it didn't shut down when
1886        /// the test failed.
1887        leaked: bool,
1888    },
1889    /// An error occurred while executing the test.
1890    ExecFail,
1891    /// The test was terminated due to a timeout.
1892    Timeout {
1893        /// Whether this timeout was treated as a failure.
1894        result: SlowTimeoutResult,
1895    },
1896}
1897
1898impl ExecutionResult {
1899    /// Returns true if the test was successful.
1900    pub fn is_success(self) -> bool {
1901        match self {
1902            ExecutionResult::Pass
1903            | ExecutionResult::Timeout {
1904                result: SlowTimeoutResult::Pass,
1905            }
1906            | ExecutionResult::Leak {
1907                result: LeakTimeoutResult::Pass,
1908            } => true,
1909            ExecutionResult::Leak {
1910                result: LeakTimeoutResult::Fail,
1911            }
1912            | ExecutionResult::Fail { .. }
1913            | ExecutionResult::ExecFail
1914            | ExecutionResult::Timeout {
1915                result: SlowTimeoutResult::Fail,
1916            } => false,
1917        }
1918    }
1919
1920    /// Returns a static string representation of the result.
1921    pub fn as_static_str(&self) -> &'static str {
1922        match self {
1923            ExecutionResult::Pass => "pass",
1924            ExecutionResult::Leak { .. } => "leak",
1925            ExecutionResult::Fail { .. } => "fail",
1926            ExecutionResult::ExecFail => "exec-fail",
1927            ExecutionResult::Timeout { .. } => "timeout",
1928        }
1929    }
1930}
1931
1932/// Failure status: either an exit code or an abort status.
1933#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1934pub enum FailureStatus {
1935    /// The test exited with a non-zero exit code.
1936    ExitCode(i32),
1937
1938    /// The test aborted.
1939    Abort(AbortStatus),
1940}
1941
1942impl FailureStatus {
1943    /// Extract the failure status from an `ExitStatus`.
1944    pub fn extract(exit_status: ExitStatus) -> Self {
1945        if let Some(abort_status) = AbortStatus::extract(exit_status) {
1946            FailureStatus::Abort(abort_status)
1947        } else {
1948            FailureStatus::ExitCode(
1949                exit_status
1950                    .code()
1951                    .expect("if abort_status is None, then code must be present"),
1952            )
1953        }
1954    }
1955}
1956
1957/// A regular exit code or Windows NT abort status for a test.
1958///
1959/// Returned as part of the [`ExecutionResult::Fail`] variant.
1960#[derive(Copy, Clone, Eq, PartialEq)]
1961pub enum AbortStatus {
1962    /// The test was aborted due to a signal on Unix.
1963    #[cfg(unix)]
1964    UnixSignal(i32),
1965
1966    /// The test was determined to have aborted because the high bit was set on Windows.
1967    #[cfg(windows)]
1968    WindowsNtStatus(windows_sys::Win32::Foundation::NTSTATUS),
1969
1970    /// The test was terminated via job object on Windows.
1971    #[cfg(windows)]
1972    JobObject,
1973}
1974
1975impl AbortStatus {
1976    /// Extract the abort status from an [`ExitStatus`].
1977    pub fn extract(exit_status: ExitStatus) -> Option<Self> {
1978        cfg_if::cfg_if! {
1979            if #[cfg(unix)] {
1980                // On Unix, extract the signal if it's found.
1981                use std::os::unix::process::ExitStatusExt;
1982                exit_status.signal().map(AbortStatus::UnixSignal)
1983            } else if #[cfg(windows)] {
1984                exit_status.code().and_then(|code| {
1985                    (code < 0).then_some(AbortStatus::WindowsNtStatus(code))
1986                })
1987            } else {
1988                None
1989            }
1990        }
1991    }
1992}
1993
1994impl fmt::Debug for AbortStatus {
1995    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1996        match self {
1997            #[cfg(unix)]
1998            AbortStatus::UnixSignal(signal) => write!(f, "UnixSignal({signal})"),
1999            #[cfg(windows)]
2000            AbortStatus::WindowsNtStatus(status) => write!(f, "WindowsNtStatus({status:x})"),
2001            #[cfg(windows)]
2002            AbortStatus::JobObject => write!(f, "JobObject"),
2003        }
2004    }
2005}
2006
2007/// A platform-independent description of an abort status.
2008///
2009/// This type can be serialized on one platform and deserialized on another,
2010/// containing all information needed for display without requiring
2011/// platform-specific lookups.
2012#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
2013#[serde(tag = "kind", rename_all = "kebab-case")]
2014#[cfg_attr(test, derive(test_strategy::Arbitrary))]
2015#[non_exhaustive]
2016pub enum AbortDescription {
2017    /// The process was aborted by a Unix signal.
2018    UnixSignal {
2019        /// The signal number.
2020        signal: i32,
2021        /// The signal name without the "SIG" prefix (e.g., "TERM", "SEGV"),
2022        /// if known.
2023        #[cfg_attr(
2024            test,
2025            strategy(proptest::option::of(crate::reporter::test_helpers::arb_smol_str()))
2026        )]
2027        name: Option<SmolStr>,
2028    },
2029
2030    /// The process was aborted with a Windows NT status code.
2031    WindowsNtStatus {
2032        /// The NTSTATUS code.
2033        code: i32,
2034        /// The human-readable message from the Win32 error code, if available.
2035        #[cfg_attr(
2036            test,
2037            strategy(proptest::option::of(crate::reporter::test_helpers::arb_smol_str()))
2038        )]
2039        message: Option<SmolStr>,
2040    },
2041
2042    /// The process was terminated via a Windows job object.
2043    WindowsJobObject,
2044}
2045
2046impl fmt::Display for AbortDescription {
2047    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2048        match self {
2049            Self::UnixSignal { signal, name } => {
2050                write!(f, "aborted with signal {signal}")?;
2051                if let Some(name) = name {
2052                    write!(f, " (SIG{name})")?;
2053                }
2054                Ok(())
2055            }
2056            Self::WindowsNtStatus { code, message } => {
2057                write!(f, "aborted with code {code:#010x}")?;
2058                if let Some(message) = message {
2059                    write!(f, ": {message}")?;
2060                }
2061                Ok(())
2062            }
2063            Self::WindowsJobObject => {
2064                write!(f, "terminated via job object")
2065            }
2066        }
2067    }
2068}
2069
2070impl From<AbortStatus> for AbortDescription {
2071    fn from(status: AbortStatus) -> Self {
2072        cfg_if::cfg_if! {
2073            if #[cfg(unix)] {
2074                match status {
2075                    AbortStatus::UnixSignal(signal) => Self::UnixSignal {
2076                        signal,
2077                        name: crate::helpers::signal_str(signal).map(SmolStr::new_static),
2078                    },
2079                }
2080            } else if #[cfg(windows)] {
2081                match status {
2082                    AbortStatus::WindowsNtStatus(code) => Self::WindowsNtStatus {
2083                        code,
2084                        message: crate::helpers::windows_nt_status_message(code),
2085                    },
2086                    AbortStatus::JobObject => Self::WindowsJobObject,
2087                }
2088            } else {
2089                match status {}
2090            }
2091        }
2092    }
2093}
2094
2095/// A platform-independent description of a test failure status.
2096///
2097/// This is the platform-independent counterpart to [`FailureStatus`].
2098#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
2099#[serde(tag = "kind", rename_all = "kebab-case")]
2100#[cfg_attr(test, derive(test_strategy::Arbitrary))]
2101#[non_exhaustive]
2102pub enum FailureDescription {
2103    /// The test exited with a non-zero exit code.
2104    ExitCode {
2105        /// The exit code.
2106        code: i32,
2107    },
2108
2109    /// The test was aborted (e.g., by a signal on Unix or NT status on Windows).
2110    ///
2111    /// Note: this is a struct variant rather than a newtype variant to ensure
2112    /// proper JSON nesting. Both `FailureDescription` and `AbortDescription`
2113    /// use `#[serde(tag = "kind")]`, and if this were a newtype variant, serde
2114    /// would flatten the inner type causing duplicate `"kind"` fields.
2115    Abort {
2116        /// The abort description.
2117        abort: AbortDescription,
2118    },
2119}
2120
2121impl From<FailureStatus> for FailureDescription {
2122    fn from(status: FailureStatus) -> Self {
2123        match status {
2124            FailureStatus::ExitCode(code) => Self::ExitCode { code },
2125            FailureStatus::Abort(abort) => Self::Abort {
2126                abort: AbortDescription::from(abort),
2127            },
2128        }
2129    }
2130}
2131
2132impl fmt::Display for FailureDescription {
2133    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2134        match self {
2135            Self::ExitCode { code } => write!(f, "exited with code {code}"),
2136            Self::Abort { abort } => write!(f, "{abort}"),
2137        }
2138    }
2139}
2140
2141/// A platform-independent description of a test execution result.
2142///
2143/// This is the platform-independent counterpart to [`ExecutionResult`], used
2144/// in external-facing types like [`ExecuteStatus`].
2145#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
2146#[serde(tag = "status", rename_all = "kebab-case")]
2147#[cfg_attr(test, derive(test_strategy::Arbitrary))]
2148#[non_exhaustive]
2149pub enum ExecutionResultDescription {
2150    /// The test passed.
2151    Pass,
2152
2153    /// The test passed but leaked handles.
2154    Leak {
2155        /// Whether this leak was treated as a failure.
2156        result: LeakTimeoutResult,
2157    },
2158
2159    /// The test failed.
2160    Fail {
2161        /// The failure status.
2162        failure: FailureDescription,
2163
2164        /// Whether the test leaked handles.
2165        leaked: bool,
2166    },
2167
2168    /// An error occurred while executing the test.
2169    ExecFail,
2170
2171    /// The test was terminated due to a timeout.
2172    Timeout {
2173        /// Whether this timeout was treated as a failure.
2174        result: SlowTimeoutResult,
2175    },
2176}
2177
2178impl ExecutionResultDescription {
2179    /// Returns true if the test was successful.
2180    pub fn is_success(&self) -> bool {
2181        match self {
2182            Self::Pass
2183            | Self::Timeout {
2184                result: SlowTimeoutResult::Pass,
2185            }
2186            | Self::Leak {
2187                result: LeakTimeoutResult::Pass,
2188            } => true,
2189            Self::Leak {
2190                result: LeakTimeoutResult::Fail,
2191            }
2192            | Self::Fail { .. }
2193            | Self::ExecFail
2194            | Self::Timeout {
2195                result: SlowTimeoutResult::Fail,
2196            } => false,
2197        }
2198    }
2199
2200    /// Returns a static string representation of the result.
2201    pub fn as_static_str(&self) -> &'static str {
2202        match self {
2203            Self::Pass => "pass",
2204            Self::Leak { .. } => "leak",
2205            Self::Fail { .. } => "fail",
2206            Self::ExecFail => "exec-fail",
2207            Self::Timeout { .. } => "timeout",
2208        }
2209    }
2210
2211    /// Returns true if this result represents a test that was terminated by nextest
2212    /// (as opposed to failing naturally).
2213    ///
2214    /// This is used to suppress output spam when running under
2215    /// TestFailureImmediate.
2216    ///
2217    /// TODO: This is a heuristic that checks if the test was terminated by
2218    /// SIGTERM (Unix) or job object (Windows). In an edge case, a test could
2219    /// send SIGTERM to itself, which would incorrectly be detected as a
2220    /// nextest-initiated termination. A more robust solution would track which
2221    /// tests were explicitly sent termination signals by nextest.
2222    pub fn is_termination_failure(&self) -> bool {
2223        matches!(
2224            self,
2225            Self::Fail {
2226                failure: FailureDescription::Abort {
2227                    abort: AbortDescription::UnixSignal {
2228                        signal: SIGTERM,
2229                        ..
2230                    },
2231                },
2232                ..
2233            } | Self::Fail {
2234                failure: FailureDescription::Abort {
2235                    abort: AbortDescription::WindowsJobObject,
2236                },
2237                ..
2238            }
2239        )
2240    }
2241}
2242
2243impl From<ExecutionResult> for ExecutionResultDescription {
2244    fn from(result: ExecutionResult) -> Self {
2245        match result {
2246            ExecutionResult::Pass => Self::Pass,
2247            ExecutionResult::Leak { result } => Self::Leak { result },
2248            ExecutionResult::Fail {
2249                failure_status,
2250                leaked,
2251            } => Self::Fail {
2252                failure: FailureDescription::from(failure_status),
2253                leaked,
2254            },
2255            ExecutionResult::ExecFail => Self::ExecFail,
2256            ExecutionResult::Timeout { result } => Self::Timeout { result },
2257        }
2258    }
2259}
2260
2261// Note: the order here matters -- it indicates severity of cancellation
2262/// The reason why a test run is being cancelled.
2263#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Deserialize, Serialize)]
2264#[serde(rename_all = "kebab-case")]
2265#[cfg_attr(test, derive(test_strategy::Arbitrary))]
2266pub enum CancelReason {
2267    /// A setup script failed.
2268    SetupScriptFailure,
2269
2270    /// A test failed and --no-fail-fast wasn't specified.
2271    TestFailure,
2272
2273    /// An error occurred while reporting results.
2274    ReportError,
2275
2276    /// The global timeout was exceeded.
2277    GlobalTimeout,
2278
2279    /// A test failed and fail-fast with immediate termination was specified.
2280    TestFailureImmediate,
2281
2282    /// A termination signal (on Unix, SIGTERM or SIGHUP) was received.
2283    Signal,
2284
2285    /// An interrupt (on Unix, Ctrl-C) was received.
2286    Interrupt,
2287
2288    /// A second signal was received, and the run is being forcibly killed.
2289    SecondSignal,
2290}
2291
2292impl CancelReason {
2293    pub(crate) fn to_static_str(self) -> &'static str {
2294        match self {
2295            CancelReason::SetupScriptFailure => "setup script failure",
2296            CancelReason::TestFailure => "test failure",
2297            CancelReason::ReportError => "reporting error",
2298            CancelReason::GlobalTimeout => "global timeout",
2299            CancelReason::TestFailureImmediate => "test failure",
2300            CancelReason::Signal => "signal",
2301            CancelReason::Interrupt => "interrupt",
2302            CancelReason::SecondSignal => "second signal",
2303        }
2304    }
2305}
2306/// The kind of unit of work that nextest is executing.
2307#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2308pub enum UnitKind {
2309    /// A test.
2310    Test,
2311
2312    /// A script (e.g. a setup script).
2313    Script,
2314}
2315
2316impl UnitKind {
2317    pub(crate) const WAITING_ON_TEST_MESSAGE: &str = "waiting on test process";
2318    pub(crate) const WAITING_ON_SCRIPT_MESSAGE: &str = "waiting on script process";
2319
2320    pub(crate) const EXECUTING_TEST_MESSAGE: &str = "executing test";
2321    pub(crate) const EXECUTING_SCRIPT_MESSAGE: &str = "executing script";
2322
2323    pub(crate) fn waiting_on_message(&self) -> &'static str {
2324        match self {
2325            UnitKind::Test => Self::WAITING_ON_TEST_MESSAGE,
2326            UnitKind::Script => Self::WAITING_ON_SCRIPT_MESSAGE,
2327        }
2328    }
2329
2330    pub(crate) fn executing_message(&self) -> &'static str {
2331        match self {
2332            UnitKind::Test => Self::EXECUTING_TEST_MESSAGE,
2333            UnitKind::Script => Self::EXECUTING_SCRIPT_MESSAGE,
2334        }
2335    }
2336}
2337
2338/// A response to an information request.
2339#[derive(Clone, Debug)]
2340pub enum InfoResponse<'a> {
2341    /// A setup script's response.
2342    SetupScript(SetupScriptInfoResponse),
2343
2344    /// A test's response.
2345    Test(TestInfoResponse<'a>),
2346}
2347
2348/// A setup script's response to an information request.
2349#[derive(Clone, Debug)]
2350pub struct SetupScriptInfoResponse {
2351    /// The stress index of the setup script.
2352    pub stress_index: Option<StressIndex>,
2353
2354    /// The identifier of the setup script instance.
2355    pub script_id: ScriptId,
2356
2357    /// The program to run.
2358    pub program: String,
2359
2360    /// The list of arguments to the program.
2361    pub args: Vec<String>,
2362
2363    /// The state of the setup script.
2364    pub state: UnitState,
2365
2366    /// Output obtained from the setup script.
2367    pub output: ChildExecutionOutputDescription<LiveSpec>,
2368}
2369
2370/// A test's response to an information request.
2371#[derive(Clone, Debug)]
2372pub struct TestInfoResponse<'a> {
2373    /// The stress index of the test.
2374    pub stress_index: Option<StressIndex>,
2375
2376    /// The test instance that the information is about.
2377    pub test_instance: TestInstanceId<'a>,
2378
2379    /// Information about retries.
2380    pub retry_data: RetryData,
2381
2382    /// The state of the test.
2383    pub state: UnitState,
2384
2385    /// Output obtained from the test.
2386    pub output: ChildExecutionOutputDescription<LiveSpec>,
2387}
2388
2389/// The current state of a test or script process: running, exiting, or
2390/// terminating.
2391///
2392/// Part of information response requests.
2393#[derive(Clone, Debug)]
2394pub enum UnitState {
2395    /// The unit is currently running.
2396    Running {
2397        /// The process ID.
2398        pid: u32,
2399
2400        /// The amount of time the unit has been running.
2401        time_taken: Duration,
2402
2403        /// `Some` if the test is marked as slow, along with the duration after
2404        /// which it was marked as slow.
2405        slow_after: Option<Duration>,
2406    },
2407
2408    /// The test has finished running, and is currently in the process of
2409    /// exiting.
2410    Exiting {
2411        /// The process ID.
2412        pid: u32,
2413
2414        /// The amount of time the unit ran for.
2415        time_taken: Duration,
2416
2417        /// `Some` if the unit is marked as slow, along with the duration after
2418        /// which it was marked as slow.
2419        slow_after: Option<Duration>,
2420
2421        /// The tentative execution result before leaked status is determined.
2422        ///
2423        /// None means that the exit status could not be read, and should be
2424        /// treated as a failure.
2425        tentative_result: Option<ExecutionResultDescription>,
2426
2427        /// How long has been spent waiting for the process to exit.
2428        waiting_duration: Duration,
2429
2430        /// How much longer nextest will wait until the test is marked leaky.
2431        remaining: Duration,
2432    },
2433
2434    /// The child process is being terminated by nextest.
2435    Terminating(UnitTerminatingState),
2436
2437    /// The unit has finished running and the process has exited.
2438    Exited {
2439        /// The result of executing the unit.
2440        result: ExecutionResultDescription,
2441
2442        /// The amount of time the unit ran for.
2443        time_taken: Duration,
2444
2445        /// `Some` if the unit is marked as slow, along with the duration after
2446        /// which it was marked as slow.
2447        slow_after: Option<Duration>,
2448    },
2449
2450    /// A delay is being waited out before the next attempt of the test is
2451    /// started. (Only relevant for tests.)
2452    DelayBeforeNextAttempt {
2453        /// The previous execution result.
2454        previous_result: ExecutionResultDescription,
2455
2456        /// Whether the previous attempt was marked as slow.
2457        previous_slow: bool,
2458
2459        /// How long has been spent waiting so far.
2460        waiting_duration: Duration,
2461
2462        /// How much longer nextest will wait until retrying the test.
2463        remaining: Duration,
2464    },
2465}
2466
2467impl UnitState {
2468    /// Returns true if the state has a valid output attached to it.
2469    pub fn has_valid_output(&self) -> bool {
2470        match self {
2471            UnitState::Running { .. }
2472            | UnitState::Exiting { .. }
2473            | UnitState::Terminating(_)
2474            | UnitState::Exited { .. } => true,
2475            UnitState::DelayBeforeNextAttempt { .. } => false,
2476        }
2477    }
2478}
2479
2480/// The current terminating state of a test or script process.
2481///
2482/// Part of [`UnitState::Terminating`].
2483#[derive(Clone, Debug)]
2484pub struct UnitTerminatingState {
2485    /// The process ID.
2486    pub pid: u32,
2487
2488    /// The amount of time the unit ran for.
2489    pub time_taken: Duration,
2490
2491    /// The reason for the termination.
2492    pub reason: UnitTerminateReason,
2493
2494    /// The method by which the process is being terminated.
2495    pub method: UnitTerminateMethod,
2496
2497    /// How long has been spent waiting for the process to exit.
2498    pub waiting_duration: Duration,
2499
2500    /// How much longer nextest will wait until a kill command is sent to the process.
2501    pub remaining: Duration,
2502}
2503
2504/// The reason for a script or test being forcibly terminated by nextest.
2505///
2506/// Part of information response requests.
2507#[derive(Clone, Copy, Debug)]
2508pub enum UnitTerminateReason {
2509    /// The unit is being terminated due to a test timeout being hit.
2510    Timeout,
2511
2512    /// The unit is being terminated due to nextest receiving a signal.
2513    Signal,
2514
2515    /// The unit is being terminated due to an interrupt (i.e. Ctrl-C).
2516    Interrupt,
2517}
2518
2519impl fmt::Display for UnitTerminateReason {
2520    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2521        match self {
2522            UnitTerminateReason::Timeout => write!(f, "timeout"),
2523            UnitTerminateReason::Signal => write!(f, "signal"),
2524            UnitTerminateReason::Interrupt => write!(f, "interrupt"),
2525        }
2526    }
2527}
2528
2529/// The way in which a script or test is being forcibly terminated by nextest.
2530#[derive(Clone, Copy, Debug)]
2531pub enum UnitTerminateMethod {
2532    /// The unit is being terminated by sending a signal.
2533    #[cfg(unix)]
2534    Signal(UnitTerminateSignal),
2535
2536    /// The unit is being terminated by terminating the Windows job object.
2537    #[cfg(windows)]
2538    JobObject,
2539
2540    /// The unit is being waited on to exit. A termination signal will be sent
2541    /// if it doesn't exit within the grace period.
2542    ///
2543    /// On Windows, this occurs when nextest receives Ctrl-C. In that case, it
2544    /// is assumed that tests will also receive Ctrl-C and exit on their own. If
2545    /// tests do not exit within the grace period configured for them, their
2546    /// corresponding job objects will be terminated.
2547    #[cfg(windows)]
2548    Wait,
2549
2550    /// A fake method used for testing.
2551    #[cfg(test)]
2552    Fake,
2553}
2554
2555#[cfg(unix)]
2556/// The signal that is or was sent to terminate a script or test.
2557#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2558pub enum UnitTerminateSignal {
2559    /// The unit is being terminated by sending a SIGINT.
2560    Interrupt,
2561
2562    /// The unit is being terminated by sending a SIGTERM signal.
2563    Term,
2564
2565    /// The unit is being terminated by sending a SIGHUP signal.
2566    Hangup,
2567
2568    /// The unit is being terminated by sending a SIGQUIT signal.
2569    Quit,
2570
2571    /// The unit is being terminated by sending a SIGKILL signal.
2572    Kill,
2573}
2574
2575#[cfg(unix)]
2576impl fmt::Display for UnitTerminateSignal {
2577    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2578        match self {
2579            UnitTerminateSignal::Interrupt => write!(f, "SIGINT"),
2580            UnitTerminateSignal::Term => write!(f, "SIGTERM"),
2581            UnitTerminateSignal::Hangup => write!(f, "SIGHUP"),
2582            UnitTerminateSignal::Quit => write!(f, "SIGQUIT"),
2583            UnitTerminateSignal::Kill => write!(f, "SIGKILL"),
2584        }
2585    }
2586}
2587
2588#[cfg(test)]
2589mod tests {
2590    use super::*;
2591
2592    #[test]
2593    fn test_is_success() {
2594        assert_eq!(
2595            RunStats::default().summarize_final(),
2596            FinalRunStats::NoTestsRun,
2597            "empty run => no tests run"
2598        );
2599        assert_eq!(
2600            RunStats {
2601                initial_run_count: 42,
2602                finished_count: 42,
2603                ..RunStats::default()
2604            }
2605            .summarize_final(),
2606            FinalRunStats::Success,
2607            "initial run count = final run count => success"
2608        );
2609        assert_eq!(
2610            RunStats {
2611                initial_run_count: 42,
2612                finished_count: 41,
2613                ..RunStats::default()
2614            }
2615            .summarize_final(),
2616            FinalRunStats::Cancelled {
2617                reason: None,
2618                kind: RunStatsFailureKind::Test {
2619                    initial_run_count: 42,
2620                    not_run: 1
2621                }
2622            },
2623            "initial run count > final run count => cancelled"
2624        );
2625        assert_eq!(
2626            RunStats {
2627                initial_run_count: 42,
2628                finished_count: 42,
2629                failed: 1,
2630                ..RunStats::default()
2631            }
2632            .summarize_final(),
2633            FinalRunStats::Failed {
2634                kind: RunStatsFailureKind::Test {
2635                    initial_run_count: 42,
2636                    not_run: 0,
2637                },
2638            },
2639            "failed => failure"
2640        );
2641        assert_eq!(
2642            RunStats {
2643                initial_run_count: 42,
2644                finished_count: 42,
2645                exec_failed: 1,
2646                ..RunStats::default()
2647            }
2648            .summarize_final(),
2649            FinalRunStats::Failed {
2650                kind: RunStatsFailureKind::Test {
2651                    initial_run_count: 42,
2652                    not_run: 0,
2653                },
2654            },
2655            "exec failed => failure"
2656        );
2657        assert_eq!(
2658            RunStats {
2659                initial_run_count: 42,
2660                finished_count: 42,
2661                failed_timed_out: 1,
2662                ..RunStats::default()
2663            }
2664            .summarize_final(),
2665            FinalRunStats::Failed {
2666                kind: RunStatsFailureKind::Test {
2667                    initial_run_count: 42,
2668                    not_run: 0,
2669                },
2670            },
2671            "timed out => failure {:?} {:?}",
2672            RunStats {
2673                initial_run_count: 42,
2674                finished_count: 42,
2675                failed_timed_out: 1,
2676                ..RunStats::default()
2677            }
2678            .summarize_final(),
2679            FinalRunStats::Failed {
2680                kind: RunStatsFailureKind::Test {
2681                    initial_run_count: 42,
2682                    not_run: 0,
2683                },
2684            },
2685        );
2686        assert_eq!(
2687            RunStats {
2688                initial_run_count: 42,
2689                finished_count: 42,
2690                skipped: 1,
2691                ..RunStats::default()
2692            }
2693            .summarize_final(),
2694            FinalRunStats::Success,
2695            "skipped => not considered a failure"
2696        );
2697
2698        assert_eq!(
2699            RunStats {
2700                setup_scripts_initial_count: 2,
2701                setup_scripts_finished_count: 1,
2702                ..RunStats::default()
2703            }
2704            .summarize_final(),
2705            FinalRunStats::Cancelled {
2706                reason: None,
2707                kind: RunStatsFailureKind::SetupScript,
2708            },
2709            "setup script failed => failure"
2710        );
2711
2712        assert_eq!(
2713            RunStats {
2714                setup_scripts_initial_count: 2,
2715                setup_scripts_finished_count: 2,
2716                setup_scripts_failed: 1,
2717                ..RunStats::default()
2718            }
2719            .summarize_final(),
2720            FinalRunStats::Failed {
2721                kind: RunStatsFailureKind::SetupScript,
2722            },
2723            "setup script failed => failure"
2724        );
2725        assert_eq!(
2726            RunStats {
2727                setup_scripts_initial_count: 2,
2728                setup_scripts_finished_count: 2,
2729                setup_scripts_exec_failed: 1,
2730                ..RunStats::default()
2731            }
2732            .summarize_final(),
2733            FinalRunStats::Failed {
2734                kind: RunStatsFailureKind::SetupScript,
2735            },
2736            "setup script exec failed => failure"
2737        );
2738        assert_eq!(
2739            RunStats {
2740                setup_scripts_initial_count: 2,
2741                setup_scripts_finished_count: 2,
2742                setup_scripts_timed_out: 1,
2743                ..RunStats::default()
2744            }
2745            .summarize_final(),
2746            FinalRunStats::Failed {
2747                kind: RunStatsFailureKind::SetupScript,
2748            },
2749            "setup script timed out => failure"
2750        );
2751        assert_eq!(
2752            RunStats {
2753                setup_scripts_initial_count: 2,
2754                setup_scripts_finished_count: 2,
2755                setup_scripts_passed: 2,
2756                ..RunStats::default()
2757            }
2758            .summarize_final(),
2759            FinalRunStats::NoTestsRun,
2760            "setup scripts passed => success, but no tests run"
2761        );
2762
2763        // Flaky tests with flaky-result = "fail" are included in `failed`, so this
2764        // is covered by the general failure tests above.
2765    }
2766
2767    /// Helper to build a minimal `ExecuteStatus<LiveSpec>` for tests.
2768    fn make_execute_status(
2769        result: ExecutionResultDescription,
2770        attempt: u32,
2771        total_attempts: u32,
2772    ) -> ExecuteStatus<LiveSpec> {
2773        make_execute_status_slow(result, attempt, total_attempts, false)
2774    }
2775
2776    /// Helper to build a minimal `ExecuteStatus<LiveSpec>` for tests, with
2777    /// the `is_slow` flag set.
2778    fn make_execute_status_slow(
2779        result: ExecutionResultDescription,
2780        attempt: u32,
2781        total_attempts: u32,
2782        is_slow: bool,
2783    ) -> ExecuteStatus<LiveSpec> {
2784        ExecuteStatus {
2785            retry_data: RetryData {
2786                attempt,
2787                total_attempts,
2788            },
2789            output: ChildExecutionOutputDescription::Output {
2790                result: Some(result.clone()),
2791                output: ChildOutputDescription::Split {
2792                    stdout: None,
2793                    stderr: None,
2794                },
2795                errors: None,
2796            },
2797            result,
2798            start_time: chrono::Utc::now().into(),
2799            time_taken: Duration::from_millis(100),
2800            is_slow,
2801            delay_before_start: Duration::ZERO,
2802            error_summary: None,
2803            output_error_slice: None,
2804        }
2805    }
2806
2807    #[test]
2808    fn is_success_for_output_by_variant() {
2809        // Success: single passing run → true.
2810        let pass_status = make_execute_status(ExecutionResultDescription::Pass, 1, 1);
2811        let success_statuses = ExecutionStatuses::new(vec![pass_status], FlakyResult::Pass);
2812        let describe = success_statuses.describe();
2813        assert!(
2814            matches!(describe, ExecutionDescription::Success { .. }),
2815            "single pass is Success"
2816        );
2817        assert!(
2818            describe.is_success_for_output(),
2819            "Success: output is success output"
2820        );
2821
2822        // Flaky pass: fail then pass → true.
2823        let fail_status = make_execute_status(
2824            ExecutionResultDescription::Fail {
2825                failure: FailureDescription::ExitCode { code: 1 },
2826                leaked: false,
2827            },
2828            1,
2829            2,
2830        );
2831        let pass_status = make_execute_status(ExecutionResultDescription::Pass, 2, 2);
2832        let flaky_pass_statuses =
2833            ExecutionStatuses::new(vec![fail_status, pass_status], FlakyResult::Pass);
2834        let describe = flaky_pass_statuses.describe();
2835        assert!(
2836            matches!(
2837                describe,
2838                ExecutionDescription::Flaky {
2839                    result: FlakyResult::Pass,
2840                    ..
2841                }
2842            ),
2843            "fail then pass with FlakyResult::Pass is Flaky Pass"
2844        );
2845        assert!(
2846            describe.is_success_for_output(),
2847            "Flaky pass: output is success output"
2848        );
2849
2850        // Flaky fail: fail then pass with result=fail → true.
2851        let fail_status = make_execute_status(
2852            ExecutionResultDescription::Fail {
2853                failure: FailureDescription::ExitCode { code: 1 },
2854                leaked: false,
2855            },
2856            1,
2857            2,
2858        );
2859        let pass_status = make_execute_status(ExecutionResultDescription::Pass, 2, 2);
2860        let flaky_fail_statuses =
2861            ExecutionStatuses::new(vec![fail_status, pass_status], FlakyResult::Fail);
2862        let describe = flaky_fail_statuses.describe();
2863        assert!(
2864            matches!(
2865                describe,
2866                ExecutionDescription::Flaky {
2867                    result: FlakyResult::Fail,
2868                    ..
2869                }
2870            ),
2871            "fail then pass with FlakyResult::Fail is Flaky Fail"
2872        );
2873        assert!(
2874            describe.is_success_for_output(),
2875            "Flaky fail: output is still success output (last attempt passed)"
2876        );
2877
2878        // Failure: single fail → false.
2879        let fail_status = make_execute_status(
2880            ExecutionResultDescription::Fail {
2881                failure: FailureDescription::ExitCode { code: 1 },
2882                leaked: false,
2883            },
2884            1,
2885            1,
2886        );
2887        let failure_statuses = ExecutionStatuses::new(vec![fail_status], FlakyResult::Pass);
2888        let describe = failure_statuses.describe();
2889        assert!(
2890            matches!(describe, ExecutionDescription::Failure { .. }),
2891            "single fail is Failure"
2892        );
2893        assert!(
2894            !describe.is_success_for_output(),
2895            "Failure: output is not success output"
2896        );
2897
2898        // Failure with retries: all fail → false.
2899        let fail1 = make_execute_status(
2900            ExecutionResultDescription::Fail {
2901                failure: FailureDescription::ExitCode { code: 1 },
2902                leaked: false,
2903            },
2904            1,
2905            2,
2906        );
2907        let fail2 = make_execute_status(
2908            ExecutionResultDescription::Fail {
2909                failure: FailureDescription::ExitCode { code: 1 },
2910                leaked: false,
2911            },
2912            2,
2913            2,
2914        );
2915        let failure_retry_statuses = ExecutionStatuses::new(vec![fail1, fail2], FlakyResult::Pass);
2916        let describe = failure_retry_statuses.describe();
2917        assert!(
2918            matches!(describe, ExecutionDescription::Failure { .. }),
2919            "all-fail with retries is Failure"
2920        );
2921        assert!(
2922            !describe.is_success_for_output(),
2923            "Failure with retries: output is not success output"
2924        );
2925    }
2926
2927    #[test]
2928    fn abort_description_serialization() {
2929        // Unix signal with name.
2930        let unix_with_name = AbortDescription::UnixSignal {
2931            signal: 15,
2932            name: Some("TERM".into()),
2933        };
2934        let json = serde_json::to_string_pretty(&unix_with_name).unwrap();
2935        insta::assert_snapshot!("abort_unix_signal_with_name", json);
2936        let roundtrip: AbortDescription = serde_json::from_str(&json).unwrap();
2937        assert_eq!(unix_with_name, roundtrip);
2938
2939        // Unix signal without name.
2940        let unix_no_name = AbortDescription::UnixSignal {
2941            signal: 42,
2942            name: None,
2943        };
2944        let json = serde_json::to_string_pretty(&unix_no_name).unwrap();
2945        insta::assert_snapshot!("abort_unix_signal_no_name", json);
2946        let roundtrip: AbortDescription = serde_json::from_str(&json).unwrap();
2947        assert_eq!(unix_no_name, roundtrip);
2948
2949        // Windows NT status (0xC000013A is STATUS_CONTROL_C_EXIT).
2950        let windows_nt = AbortDescription::WindowsNtStatus {
2951            code: -1073741510_i32,
2952            message: Some("The application terminated as a result of a CTRL+C.".into()),
2953        };
2954        let json = serde_json::to_string_pretty(&windows_nt).unwrap();
2955        insta::assert_snapshot!("abort_windows_nt_status", json);
2956        let roundtrip: AbortDescription = serde_json::from_str(&json).unwrap();
2957        assert_eq!(windows_nt, roundtrip);
2958
2959        // Windows NT status without message.
2960        let windows_nt_no_msg = AbortDescription::WindowsNtStatus {
2961            code: -1073741819_i32,
2962            message: None,
2963        };
2964        let json = serde_json::to_string_pretty(&windows_nt_no_msg).unwrap();
2965        insta::assert_snapshot!("abort_windows_nt_status_no_message", json);
2966        let roundtrip: AbortDescription = serde_json::from_str(&json).unwrap();
2967        assert_eq!(windows_nt_no_msg, roundtrip);
2968
2969        // Windows job object.
2970        let job = AbortDescription::WindowsJobObject;
2971        let json = serde_json::to_string_pretty(&job).unwrap();
2972        insta::assert_snapshot!("abort_windows_job_object", json);
2973        let roundtrip: AbortDescription = serde_json::from_str(&json).unwrap();
2974        assert_eq!(job, roundtrip);
2975    }
2976
2977    #[test]
2978    fn abort_description_cross_platform_deserialization() {
2979        // Cross-platform deserialization: these JSON strings could come from any
2980        // platform. Verify they deserialize correctly regardless of current platform.
2981        let unix_json = r#"{"kind":"unix-signal","signal":11,"name":"SEGV"}"#;
2982        let unix_desc: AbortDescription = serde_json::from_str(unix_json).unwrap();
2983        assert_eq!(
2984            unix_desc,
2985            AbortDescription::UnixSignal {
2986                signal: 11,
2987                name: Some("SEGV".into()),
2988            }
2989        );
2990
2991        let windows_json = r#"{"kind":"windows-nt-status","code":-1073741510,"message":"CTRL+C"}"#;
2992        let windows_desc: AbortDescription = serde_json::from_str(windows_json).unwrap();
2993        assert_eq!(
2994            windows_desc,
2995            AbortDescription::WindowsNtStatus {
2996                code: -1073741510,
2997                message: Some("CTRL+C".into()),
2998            }
2999        );
3000
3001        let job_json = r#"{"kind":"windows-job-object"}"#;
3002        let job_desc: AbortDescription = serde_json::from_str(job_json).unwrap();
3003        assert_eq!(job_desc, AbortDescription::WindowsJobObject);
3004    }
3005
3006    #[test]
3007    fn abort_description_display() {
3008        // Unix signal with name.
3009        let unix = AbortDescription::UnixSignal {
3010            signal: 15,
3011            name: Some("TERM".into()),
3012        };
3013        assert_eq!(unix.to_string(), "aborted with signal 15 (SIGTERM)");
3014
3015        // Unix signal without a name.
3016        let unix_no_name = AbortDescription::UnixSignal {
3017            signal: 42,
3018            name: None,
3019        };
3020        assert_eq!(unix_no_name.to_string(), "aborted with signal 42");
3021
3022        // Windows NT status with message.
3023        let windows = AbortDescription::WindowsNtStatus {
3024            code: -1073741510,
3025            message: Some("CTRL+C exit".into()),
3026        };
3027        assert_eq!(
3028            windows.to_string(),
3029            "aborted with code 0xc000013a: CTRL+C exit"
3030        );
3031
3032        // Windows NT status without message.
3033        let windows_no_msg = AbortDescription::WindowsNtStatus {
3034            code: -1073741510,
3035            message: None,
3036        };
3037        assert_eq!(windows_no_msg.to_string(), "aborted with code 0xc000013a");
3038
3039        // Windows job object.
3040        let job = AbortDescription::WindowsJobObject;
3041        assert_eq!(job.to_string(), "terminated via job object");
3042    }
3043
3044    #[cfg(unix)]
3045    #[test]
3046    fn abort_description_from_abort_status() {
3047        // Test conversion from AbortStatus to AbortDescription on Unix.
3048        let status = AbortStatus::UnixSignal(15);
3049        let description = AbortDescription::from(status);
3050
3051        assert_eq!(
3052            description,
3053            AbortDescription::UnixSignal {
3054                signal: 15,
3055                name: Some("TERM".into()),
3056            }
3057        );
3058
3059        // Unknown signal.
3060        let unknown_status = AbortStatus::UnixSignal(42);
3061        let unknown_description = AbortDescription::from(unknown_status);
3062        assert_eq!(
3063            unknown_description,
3064            AbortDescription::UnixSignal {
3065                signal: 42,
3066                name: None,
3067            }
3068        );
3069    }
3070
3071    #[test]
3072    fn execution_result_description_serialization() {
3073        // Test all variants of ExecutionResultDescription for serialization roundtrips.
3074
3075        // Pass.
3076        let pass = ExecutionResultDescription::Pass;
3077        let json = serde_json::to_string_pretty(&pass).unwrap();
3078        insta::assert_snapshot!("pass", json);
3079        let roundtrip: ExecutionResultDescription = serde_json::from_str(&json).unwrap();
3080        assert_eq!(pass, roundtrip);
3081
3082        // Leak with pass result.
3083        let leak_pass = ExecutionResultDescription::Leak {
3084            result: LeakTimeoutResult::Pass,
3085        };
3086        let json = serde_json::to_string_pretty(&leak_pass).unwrap();
3087        insta::assert_snapshot!("leak_pass", json);
3088        let roundtrip: ExecutionResultDescription = serde_json::from_str(&json).unwrap();
3089        assert_eq!(leak_pass, roundtrip);
3090
3091        // Leak with fail result.
3092        let leak_fail = ExecutionResultDescription::Leak {
3093            result: LeakTimeoutResult::Fail,
3094        };
3095        let json = serde_json::to_string_pretty(&leak_fail).unwrap();
3096        insta::assert_snapshot!("leak_fail", json);
3097        let roundtrip: ExecutionResultDescription = serde_json::from_str(&json).unwrap();
3098        assert_eq!(leak_fail, roundtrip);
3099
3100        // Fail with exit code, no leak.
3101        let fail_exit_code = ExecutionResultDescription::Fail {
3102            failure: FailureDescription::ExitCode { code: 101 },
3103            leaked: false,
3104        };
3105        let json = serde_json::to_string_pretty(&fail_exit_code).unwrap();
3106        insta::assert_snapshot!("fail_exit_code", json);
3107        let roundtrip: ExecutionResultDescription = serde_json::from_str(&json).unwrap();
3108        assert_eq!(fail_exit_code, roundtrip);
3109
3110        // Fail with exit code and leak.
3111        let fail_exit_code_leaked = ExecutionResultDescription::Fail {
3112            failure: FailureDescription::ExitCode { code: 1 },
3113            leaked: true,
3114        };
3115        let json = serde_json::to_string_pretty(&fail_exit_code_leaked).unwrap();
3116        insta::assert_snapshot!("fail_exit_code_leaked", json);
3117        let roundtrip: ExecutionResultDescription = serde_json::from_str(&json).unwrap();
3118        assert_eq!(fail_exit_code_leaked, roundtrip);
3119
3120        // Fail with Unix signal abort.
3121        let fail_unix_signal = ExecutionResultDescription::Fail {
3122            failure: FailureDescription::Abort {
3123                abort: AbortDescription::UnixSignal {
3124                    signal: 11,
3125                    name: Some("SEGV".into()),
3126                },
3127            },
3128            leaked: false,
3129        };
3130        let json = serde_json::to_string_pretty(&fail_unix_signal).unwrap();
3131        insta::assert_snapshot!("fail_unix_signal", json);
3132        let roundtrip: ExecutionResultDescription = serde_json::from_str(&json).unwrap();
3133        assert_eq!(fail_unix_signal, roundtrip);
3134
3135        // Fail with Unix signal abort (no name) and leak.
3136        let fail_unix_signal_unknown = ExecutionResultDescription::Fail {
3137            failure: FailureDescription::Abort {
3138                abort: AbortDescription::UnixSignal {
3139                    signal: 42,
3140                    name: None,
3141                },
3142            },
3143            leaked: true,
3144        };
3145        let json = serde_json::to_string_pretty(&fail_unix_signal_unknown).unwrap();
3146        insta::assert_snapshot!("fail_unix_signal_unknown_leaked", json);
3147        let roundtrip: ExecutionResultDescription = serde_json::from_str(&json).unwrap();
3148        assert_eq!(fail_unix_signal_unknown, roundtrip);
3149
3150        // Fail with Windows NT status abort.
3151        let fail_windows_nt = ExecutionResultDescription::Fail {
3152            failure: FailureDescription::Abort {
3153                abort: AbortDescription::WindowsNtStatus {
3154                    code: -1073741510,
3155                    message: Some("The application terminated as a result of a CTRL+C.".into()),
3156                },
3157            },
3158            leaked: false,
3159        };
3160        let json = serde_json::to_string_pretty(&fail_windows_nt).unwrap();
3161        insta::assert_snapshot!("fail_windows_nt_status", json);
3162        let roundtrip: ExecutionResultDescription = serde_json::from_str(&json).unwrap();
3163        assert_eq!(fail_windows_nt, roundtrip);
3164
3165        // Fail with Windows NT status abort (no message).
3166        let fail_windows_nt_no_msg = ExecutionResultDescription::Fail {
3167            failure: FailureDescription::Abort {
3168                abort: AbortDescription::WindowsNtStatus {
3169                    code: -1073741819,
3170                    message: None,
3171                },
3172            },
3173            leaked: false,
3174        };
3175        let json = serde_json::to_string_pretty(&fail_windows_nt_no_msg).unwrap();
3176        insta::assert_snapshot!("fail_windows_nt_status_no_message", json);
3177        let roundtrip: ExecutionResultDescription = serde_json::from_str(&json).unwrap();
3178        assert_eq!(fail_windows_nt_no_msg, roundtrip);
3179
3180        // Fail with Windows job object abort.
3181        let fail_job_object = ExecutionResultDescription::Fail {
3182            failure: FailureDescription::Abort {
3183                abort: AbortDescription::WindowsJobObject,
3184            },
3185            leaked: false,
3186        };
3187        let json = serde_json::to_string_pretty(&fail_job_object).unwrap();
3188        insta::assert_snapshot!("fail_windows_job_object", json);
3189        let roundtrip: ExecutionResultDescription = serde_json::from_str(&json).unwrap();
3190        assert_eq!(fail_job_object, roundtrip);
3191
3192        // ExecFail.
3193        let exec_fail = ExecutionResultDescription::ExecFail;
3194        let json = serde_json::to_string_pretty(&exec_fail).unwrap();
3195        insta::assert_snapshot!("exec_fail", json);
3196        let roundtrip: ExecutionResultDescription = serde_json::from_str(&json).unwrap();
3197        assert_eq!(exec_fail, roundtrip);
3198
3199        // Timeout with pass result.
3200        let timeout_pass = ExecutionResultDescription::Timeout {
3201            result: SlowTimeoutResult::Pass,
3202        };
3203        let json = serde_json::to_string_pretty(&timeout_pass).unwrap();
3204        insta::assert_snapshot!("timeout_pass", json);
3205        let roundtrip: ExecutionResultDescription = serde_json::from_str(&json).unwrap();
3206        assert_eq!(timeout_pass, roundtrip);
3207
3208        // Timeout with fail result.
3209        let timeout_fail = ExecutionResultDescription::Timeout {
3210            result: SlowTimeoutResult::Fail,
3211        };
3212        let json = serde_json::to_string_pretty(&timeout_fail).unwrap();
3213        insta::assert_snapshot!("timeout_fail", json);
3214        let roundtrip: ExecutionResultDescription = serde_json::from_str(&json).unwrap();
3215        assert_eq!(timeout_fail, roundtrip);
3216    }
3217
3218    // --- on_test_finished tests ---
3219
3220    /// Helper to create a fail-then-pass `ExecutionStatuses` for flaky
3221    /// test scenarios.
3222    fn make_flaky_statuses(
3223        pass_result: ExecutionResultDescription,
3224        flaky_result: FlakyResult,
3225        is_slow: bool,
3226    ) -> ExecutionStatuses<LiveSpec> {
3227        let fail = make_execute_status(
3228            ExecutionResultDescription::Fail {
3229                failure: FailureDescription::ExitCode { code: 1 },
3230                leaked: false,
3231            },
3232            1,
3233            2,
3234        );
3235        let pass = make_execute_status_slow(pass_result, 2, 2, is_slow);
3236        ExecutionStatuses::new(vec![fail, pass], flaky_result)
3237    }
3238
3239    /// Helper to run `on_test_finished` on a fresh `RunStats` and return it.
3240    fn run_on_test_finished(statuses: &ExecutionStatuses<LiveSpec>) -> RunStats {
3241        let mut stats = RunStats {
3242            initial_run_count: 1,
3243            ..RunStats::default()
3244        };
3245        stats.on_test_finished(statuses);
3246        stats
3247    }
3248
3249    #[test]
3250    fn on_test_finished_pass_flaky() {
3251        // FlakyResult::Fail (not slow): counts as failed.
3252        let stats = run_on_test_finished(&make_flaky_statuses(
3253            ExecutionResultDescription::Pass,
3254            FlakyResult::Fail,
3255            false,
3256        ));
3257        assert_eq!(stats.finished_count, 1);
3258        assert_eq!(stats.failed, 1);
3259        assert_eq!(stats.failed_slow, 0, "not slow");
3260        assert_eq!(stats.passed, 0);
3261        assert_eq!(stats.flaky, 0);
3262
3263        // FlakyResult::Fail (slow): counts as failed and failed_slow.
3264        let stats = run_on_test_finished(&make_flaky_statuses(
3265            ExecutionResultDescription::Pass,
3266            FlakyResult::Fail,
3267            true,
3268        ));
3269        assert_eq!(stats.failed, 1);
3270        assert_eq!(stats.failed_slow, 1);
3271        assert_eq!(stats.passed, 0);
3272        assert_eq!(stats.flaky, 0);
3273
3274        // FlakyResult::Pass: counts as passed and flaky.
3275        let stats = run_on_test_finished(&make_flaky_statuses(
3276            ExecutionResultDescription::Pass,
3277            FlakyResult::Pass,
3278            true,
3279        ));
3280        assert_eq!(stats.passed, 1);
3281        assert_eq!(stats.passed_slow, 1);
3282        assert_eq!(stats.flaky, 1);
3283        assert_eq!(stats.failed, 0);
3284    }
3285
3286    #[test]
3287    fn on_test_finished_leak_pass_flaky() {
3288        // FlakyResult::Fail (not slow): counts as failed and leaky.
3289        let stats = run_on_test_finished(&make_flaky_statuses(
3290            ExecutionResultDescription::Leak {
3291                result: LeakTimeoutResult::Pass,
3292            },
3293            FlakyResult::Fail,
3294            false,
3295        ));
3296        assert_eq!(stats.failed, 1);
3297        assert_eq!(stats.failed_slow, 0, "not slow");
3298        assert_eq!(stats.leaky, 1, "leak still tracked");
3299        assert_eq!(stats.passed, 0);
3300        assert_eq!(stats.flaky, 0);
3301
3302        // FlakyResult::Fail (slow): also tracks failed_slow.
3303        let stats = run_on_test_finished(&make_flaky_statuses(
3304            ExecutionResultDescription::Leak {
3305                result: LeakTimeoutResult::Pass,
3306            },
3307            FlakyResult::Fail,
3308            true,
3309        ));
3310        assert_eq!(stats.failed, 1);
3311        assert_eq!(stats.failed_slow, 1);
3312        assert_eq!(stats.leaky, 1);
3313        assert_eq!(stats.passed, 0);
3314        assert_eq!(stats.flaky, 0);
3315
3316        // FlakyResult::Pass: counts as passed, leaky, and flaky.
3317        let stats = run_on_test_finished(&make_flaky_statuses(
3318            ExecutionResultDescription::Leak {
3319                result: LeakTimeoutResult::Pass,
3320            },
3321            FlakyResult::Pass,
3322            true,
3323        ));
3324        assert_eq!(stats.passed, 1);
3325        assert_eq!(stats.passed_slow, 1);
3326        assert_eq!(stats.leaky, 1);
3327        assert_eq!(stats.flaky, 1);
3328        assert_eq!(stats.failed, 0);
3329    }
3330
3331    #[test]
3332    fn on_test_finished_timeout_pass_flaky() {
3333        // FlakyResult::Fail (slow): counts as failed and failed_slow,
3334        // not passed_timed_out.
3335        let stats = run_on_test_finished(&make_flaky_statuses(
3336            ExecutionResultDescription::Timeout {
3337                result: SlowTimeoutResult::Pass,
3338            },
3339            FlakyResult::Fail,
3340            true,
3341        ));
3342        assert_eq!(stats.failed, 1);
3343        assert_eq!(stats.failed_slow, 1);
3344        assert_eq!(stats.passed, 0);
3345        assert_eq!(stats.passed_timed_out, 0);
3346        assert_eq!(stats.flaky, 0);
3347
3348        // FlakyResult::Pass: counts as passed, passed_timed_out, and flaky.
3349        let stats = run_on_test_finished(&make_flaky_statuses(
3350            ExecutionResultDescription::Timeout {
3351                result: SlowTimeoutResult::Pass,
3352            },
3353            FlakyResult::Pass,
3354            false,
3355        ));
3356        assert_eq!(stats.passed, 1);
3357        assert_eq!(stats.passed_timed_out, 1);
3358        assert_eq!(stats.flaky, 1);
3359        assert_eq!(stats.failed, 0);
3360    }
3361
3362    #[test]
3363    fn on_test_finished_non_flaky() {
3364        // Single-attempt pass (slow): counts as passed, not flaky.
3365        let pass = make_execute_status_slow(ExecutionResultDescription::Pass, 1, 1, true);
3366        let stats = run_on_test_finished(&ExecutionStatuses::new(vec![pass], FlakyResult::Pass));
3367        assert_eq!(stats.passed, 1);
3368        assert_eq!(stats.passed_slow, 1);
3369        assert_eq!(stats.flaky, 0);
3370        assert_eq!(stats.failed, 0);
3371
3372        // Single-attempt failure (slow): counts as failed and failed_slow.
3373        let fail = make_execute_status_slow(
3374            ExecutionResultDescription::Fail {
3375                failure: FailureDescription::ExitCode { code: 1 },
3376                leaked: false,
3377            },
3378            1,
3379            1,
3380            true,
3381        );
3382        let stats = run_on_test_finished(&ExecutionStatuses::new(vec![fail], FlakyResult::Pass));
3383        assert_eq!(stats.failed, 1);
3384        assert_eq!(stats.failed_slow, 1);
3385        assert_eq!(stats.passed, 0);
3386        assert_eq!(stats.flaky, 0);
3387    }
3388}