nextest_runner/reporter/
events.rs

1// Copyright (c) The nextest Contributors
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Events for the reporter.
5//!
6//! These types form the interface between the test runner and the test
7//! reporter. The root structure for all events is [`TestEvent`].
8
9use super::{FinalStatusLevel, StatusLevel, TestOutputDisplay};
10use crate::{
11    config::{LeakTimeoutResult, ScriptId},
12    list::{TestInstance, TestInstanceId, TestList},
13    test_output::ChildExecutionOutput,
14};
15use chrono::{DateTime, FixedOffset};
16use nextest_metadata::MismatchReason;
17use quick_junit::ReportUuid;
18use std::{collections::BTreeMap, fmt, process::ExitStatus, time::Duration};
19
20/// A test event.
21///
22/// Events are produced by a [`TestRunner`](crate::runner::TestRunner) and
23/// consumed by a [`Reporter`](crate::reporter::Reporter).
24#[derive(Clone, Debug)]
25pub struct TestEvent<'a> {
26    /// The time at which the event was generated, including the offset from UTC.
27    pub timestamp: DateTime<FixedOffset>,
28
29    /// The amount of time elapsed since the start of the test run.
30    pub elapsed: Duration,
31
32    /// The kind of test event this is.
33    pub kind: TestEventKind<'a>,
34}
35
36/// The kind of test event this is.
37///
38/// Forms part of [`TestEvent`].
39#[derive(Clone, Debug)]
40pub enum TestEventKind<'a> {
41    /// The test run started.
42    RunStarted {
43        /// The list of tests that will be run.
44        ///
45        /// The methods on the test list indicate the number of tests that will be run.
46        test_list: &'a TestList<'a>,
47
48        /// The UUID for this run.
49        run_id: ReportUuid,
50
51        /// The nextest profile chosen for this run.
52        profile_name: String,
53
54        /// The command-line arguments for the process.
55        cli_args: Vec<String>,
56    },
57
58    /// A setup script started.
59    SetupScriptStarted {
60        /// The setup script index.
61        index: usize,
62
63        /// The total number of setup scripts.
64        total: usize,
65
66        /// The script ID.
67        script_id: ScriptId,
68
69        /// The command to run.
70        command: &'a str,
71
72        /// The arguments to the command.
73        args: &'a [String],
74
75        /// True if some output from the setup script is being passed through.
76        no_capture: bool,
77    },
78
79    /// A setup script was slow.
80    SetupScriptSlow {
81        /// The script ID.
82        script_id: ScriptId,
83
84        /// The command to run.
85        command: &'a str,
86
87        /// The arguments to the command.
88        args: &'a [String],
89
90        /// The amount of time elapsed since the start of execution.
91        elapsed: Duration,
92
93        /// True if the script has hit its timeout and is about to be terminated.
94        will_terminate: bool,
95    },
96
97    /// A setup script completed execution.
98    SetupScriptFinished {
99        /// The setup script index.
100        index: usize,
101
102        /// The total number of setup scripts.
103        total: usize,
104
105        /// The script ID.
106        script_id: ScriptId,
107
108        /// The command to run.
109        command: &'a str,
110
111        /// The arguments to the command.
112        args: &'a [String],
113
114        /// Whether the JUnit report should store success output for this script.
115        junit_store_success_output: bool,
116
117        /// Whether the JUnit report should store failure output for this script.
118        junit_store_failure_output: bool,
119
120        /// True if some output from the setup script was passed through.
121        no_capture: bool,
122
123        /// The execution status of the setup script.
124        run_status: SetupScriptExecuteStatus,
125    },
126
127    // TODO: add events for BinaryStarted and BinaryFinished? May want a slightly different way to
128    // do things, maybe a couple of reporter traits (one for the run as a whole and one for each
129    // binary).
130    /// A test started running.
131    TestStarted {
132        /// The test instance that was started.
133        test_instance: TestInstance<'a>,
134
135        /// Current run statistics so far.
136        current_stats: RunStats,
137
138        /// The number of tests currently running, including this one.
139        running: usize,
140
141        /// The cancel status of the run. This is None if the run is still ongoing.
142        cancel_state: Option<CancelReason>,
143    },
144
145    /// A test was slower than a configured soft timeout.
146    TestSlow {
147        /// The test instance that was slow.
148        test_instance: TestInstance<'a>,
149
150        /// Retry data.
151        retry_data: RetryData,
152
153        /// The amount of time that has elapsed since the beginning of the test.
154        elapsed: Duration,
155
156        /// True if the test has hit its timeout and is about to be terminated.
157        will_terminate: bool,
158    },
159
160    /// A test attempt failed and will be retried in the future.
161    ///
162    /// This event does not occur on the final run of a failing test.
163    TestAttemptFailedWillRetry {
164        /// The test instance that is being retried.
165        test_instance: TestInstance<'a>,
166
167        /// The status of this attempt to run the test. Will never be success.
168        run_status: ExecuteStatus,
169
170        /// The delay before the next attempt to run the test.
171        delay_before_next_attempt: Duration,
172
173        /// Whether failure outputs are printed out.
174        failure_output: TestOutputDisplay,
175    },
176
177    /// A retry has started.
178    TestRetryStarted {
179        /// The test instance that is being retried.
180        test_instance: TestInstance<'a>,
181
182        /// Data related to retries.
183        retry_data: RetryData,
184    },
185
186    /// A test finished running.
187    TestFinished {
188        /// The test instance that finished running.
189        test_instance: TestInstance<'a>,
190
191        /// Test setting for success output.
192        success_output: TestOutputDisplay,
193
194        /// Test setting for failure output.
195        failure_output: TestOutputDisplay,
196
197        /// Whether the JUnit report should store success output for this test.
198        junit_store_success_output: bool,
199
200        /// Whether the JUnit report should store failure output for this test.
201        junit_store_failure_output: bool,
202
203        /// Information about all the runs for this test.
204        run_statuses: ExecutionStatuses,
205
206        /// Current statistics for number of tests so far.
207        current_stats: RunStats,
208
209        /// The number of tests that are currently running, excluding this one.
210        running: usize,
211
212        /// The cancel status of the run. This is None if the run is still ongoing.
213        cancel_state: Option<CancelReason>,
214    },
215
216    /// A test was skipped.
217    TestSkipped {
218        /// The test instance that was skipped.
219        test_instance: TestInstance<'a>,
220
221        /// The reason this test was skipped.
222        reason: MismatchReason,
223    },
224
225    /// An information request was received.
226    InfoStarted {
227        /// The number of tasks currently running. This is the same as the
228        /// number of expected responses.
229        total: usize,
230
231        /// Statistics for the run.
232        run_stats: RunStats,
233    },
234
235    /// Information about a script or test was received.
236    InfoResponse {
237        /// The index of the response, starting from 0.
238        index: usize,
239
240        /// The total number of responses expected.
241        total: usize,
242
243        /// The response itself.
244        response: InfoResponse<'a>,
245    },
246
247    /// An information request was completed.
248    InfoFinished {
249        /// The number of responses that were not received. In most cases, this
250        /// is 0.
251        missing: usize,
252    },
253
254    /// `Enter` was pressed. Either a newline or a progress bar snapshot needs
255    /// to be printed.
256    InputEnter {
257        /// Current statistics for number of tests so far.
258        current_stats: RunStats,
259
260        /// The number of tests running.
261        running: usize,
262
263        /// The cancel status of the run. This is None if the run is still ongoing.
264        cancel_reason: Option<CancelReason>,
265    },
266
267    /// A cancellation notice was received.
268    RunBeginCancel {
269        /// The number of setup scripts still running.
270        setup_scripts_running: usize,
271
272        /// The number of tests still running.
273        running: usize,
274
275        /// The reason this run was cancelled.
276        reason: CancelReason,
277    },
278
279    /// A forcible kill was requested due to receiving a signal.
280    RunBeginKill {
281        /// The number of setup scripts still running.
282        setup_scripts_running: usize,
283
284        /// The number of tests still running.
285        running: usize,
286
287        /// The reason this run was killed.
288        reason: CancelReason,
289    },
290
291    /// A SIGTSTP event was received and the run was paused.
292    RunPaused {
293        /// The number of setup scripts running.
294        setup_scripts_running: usize,
295
296        /// The number of tests currently running.
297        running: usize,
298    },
299
300    /// A SIGCONT event was received and the run is being continued.
301    RunContinued {
302        /// The number of setup scripts that will be started up again.
303        setup_scripts_running: usize,
304
305        /// The number of tests that will be started up again.
306        running: usize,
307    },
308
309    /// The test run finished.
310    RunFinished {
311        /// The unique ID for this run.
312        run_id: ReportUuid,
313
314        /// The time at which the run was started.
315        start_time: DateTime<FixedOffset>,
316
317        /// The amount of time it took for the tests to run.
318        elapsed: Duration,
319
320        /// Statistics for the run.
321        run_stats: RunStats,
322    },
323}
324
325/// Statistics for a test run.
326#[derive(Copy, Clone, Default, Debug, Eq, PartialEq)]
327pub struct RunStats {
328    /// The total number of tests that were expected to be run at the beginning.
329    ///
330    /// If the test run is cancelled, this will be more than `finished_count` at the end.
331    pub initial_run_count: usize,
332
333    /// The total number of tests that finished running.
334    pub finished_count: usize,
335
336    /// The total number of setup scripts that were expected to be run at the beginning.
337    ///
338    /// If the test run is cancelled, this will be more than `finished_count` at the end.
339    pub setup_scripts_initial_count: usize,
340
341    /// The total number of setup scripts that finished running.
342    pub setup_scripts_finished_count: usize,
343
344    /// The number of setup scripts that passed.
345    pub setup_scripts_passed: usize,
346
347    /// The number of setup scripts that failed.
348    pub setup_scripts_failed: usize,
349
350    /// The number of setup scripts that encountered an execution failure.
351    pub setup_scripts_exec_failed: usize,
352
353    /// The number of setup scripts that timed out.
354    pub setup_scripts_timed_out: usize,
355
356    /// The number of tests that passed. Includes `passed_slow`, `flaky` and `leaky`.
357    pub passed: usize,
358
359    /// The number of slow tests that passed.
360    pub passed_slow: usize,
361
362    /// The number of tests that passed on retry.
363    pub flaky: usize,
364
365    /// The number of tests that failed.
366    pub failed: usize,
367
368    /// The number of failed tests that were slow.
369    pub failed_slow: usize,
370
371    /// The number of tests that timed out.
372    pub timed_out: usize,
373
374    /// The number of tests that passed but leaked handles.
375    pub leaky: usize,
376
377    /// The number of tests that otherwise passed, but leaked handles and were
378    /// treated as failed as a result.
379    pub leaky_failed: usize,
380
381    /// The number of tests that encountered an execution failure.
382    pub exec_failed: usize,
383
384    /// The number of tests that were skipped.
385    pub skipped: usize,
386}
387
388impl RunStats {
389    /// Returns true if there are any failures recorded in the stats.
390    pub fn has_failures(&self) -> bool {
391        self.failed_setup_script_count() > 0 || self.failed_count() > 0
392    }
393
394    /// Returns count of setup scripts that did not pass.
395    pub fn failed_setup_script_count(&self) -> usize {
396        self.setup_scripts_failed + self.setup_scripts_exec_failed + self.setup_scripts_timed_out
397    }
398
399    /// Returns count of tests that did not pass.
400    pub fn failed_count(&self) -> usize {
401        self.failed + self.exec_failed + self.timed_out
402    }
403
404    /// Summarizes the stats as an enum at the end of a test run.
405    pub fn summarize_final(&self) -> FinalRunStats {
406        // Check for failures first. The order of setup scripts vs tests should not be important,
407        // though we don't assert that here.
408        if self.failed_setup_script_count() > 0 {
409            FinalRunStats::Failed(RunStatsFailureKind::SetupScript)
410        } else if self.setup_scripts_initial_count > self.setup_scripts_finished_count {
411            FinalRunStats::Cancelled(RunStatsFailureKind::SetupScript)
412        } else if self.failed_count() > 0 {
413            FinalRunStats::Failed(RunStatsFailureKind::Test {
414                initial_run_count: self.initial_run_count,
415                not_run: self.initial_run_count.saturating_sub(self.finished_count),
416            })
417        } else if self.initial_run_count > self.finished_count {
418            FinalRunStats::Cancelled(RunStatsFailureKind::Test {
419                initial_run_count: self.initial_run_count,
420                not_run: self.initial_run_count.saturating_sub(self.finished_count),
421            })
422        } else if self.finished_count == 0 {
423            FinalRunStats::NoTestsRun
424        } else {
425            FinalRunStats::Success
426        }
427    }
428
429    pub(crate) fn on_setup_script_finished(&mut self, status: &SetupScriptExecuteStatus) {
430        self.setup_scripts_finished_count += 1;
431
432        match status.result {
433            ExecutionResult::Pass
434            | ExecutionResult::Leak {
435                result: LeakTimeoutResult::Pass,
436            } => {
437                self.setup_scripts_passed += 1;
438            }
439            ExecutionResult::Fail { .. }
440            | ExecutionResult::Leak {
441                result: LeakTimeoutResult::Fail,
442            } => {
443                self.setup_scripts_failed += 1;
444            }
445            ExecutionResult::ExecFail => {
446                self.setup_scripts_exec_failed += 1;
447            }
448            ExecutionResult::Timeout => {
449                self.setup_scripts_timed_out += 1;
450            }
451        }
452    }
453
454    pub(crate) fn on_test_finished(&mut self, run_statuses: &ExecutionStatuses) {
455        self.finished_count += 1;
456        // run_statuses is guaranteed to have at least one element.
457        // * If the last element is success, treat it as success (and possibly flaky).
458        // * If the last element is a failure, use it to determine fail/exec fail.
459        // Note that this is different from what Maven Surefire does (use the first failure):
460        // https://maven.apache.org/surefire/maven-surefire-plugin/examples/rerun-failing-tests.html
461        //
462        // This is not likely to matter much in practice since failures are likely to be of the
463        // same type.
464        let last_status = run_statuses.last_status();
465        match last_status.result {
466            ExecutionResult::Pass => {
467                self.passed += 1;
468                if last_status.is_slow {
469                    self.passed_slow += 1;
470                }
471                if run_statuses.len() > 1 {
472                    self.flaky += 1;
473                }
474            }
475            ExecutionResult::Leak {
476                result: LeakTimeoutResult::Pass,
477            } => {
478                self.passed += 1;
479                self.leaky += 1;
480                if last_status.is_slow {
481                    self.passed_slow += 1;
482                }
483                if run_statuses.len() > 1 {
484                    self.flaky += 1;
485                }
486            }
487            ExecutionResult::Leak {
488                result: LeakTimeoutResult::Fail,
489            } => {
490                self.failed += 1;
491                self.leaky_failed += 1;
492                if last_status.is_slow {
493                    self.failed_slow += 1;
494                }
495            }
496            ExecutionResult::Fail { .. } => {
497                self.failed += 1;
498                if last_status.is_slow {
499                    self.failed_slow += 1;
500                }
501            }
502            ExecutionResult::Timeout => self.timed_out += 1,
503            ExecutionResult::ExecFail => self.exec_failed += 1,
504        }
505    }
506}
507
508/// A type summarizing the possible outcomes of a test run.
509#[derive(Copy, Clone, Debug, Eq, PartialEq)]
510pub enum FinalRunStats {
511    /// The test run was successful, or is successful so far.
512    Success,
513
514    /// The test run was successful, or is successful so far, but no tests were selected to run.
515    NoTestsRun,
516
517    /// The test run was cancelled.
518    Cancelled(RunStatsFailureKind),
519
520    /// At least one test failed.
521    Failed(RunStatsFailureKind),
522}
523
524/// A type summarizing the step at which a test run failed.
525#[derive(Copy, Clone, Debug, Eq, PartialEq)]
526pub enum RunStatsFailureKind {
527    /// The run was interrupted during setup script execution.
528    SetupScript,
529
530    /// The run was interrupted during test execution.
531    Test {
532        /// The total number of tests scheduled.
533        initial_run_count: usize,
534
535        /// The number of tests not run, or for a currently-executing test the number queued up to
536        /// run.
537        not_run: usize,
538    },
539}
540
541/// Information about executions of a test, including retries.
542#[derive(Clone, Debug)]
543pub struct ExecutionStatuses {
544    /// This is guaranteed to be non-empty.
545    statuses: Vec<ExecuteStatus>,
546}
547
548#[expect(clippy::len_without_is_empty)] // RunStatuses is never empty
549impl ExecutionStatuses {
550    pub(crate) fn new(statuses: Vec<ExecuteStatus>) -> Self {
551        Self { statuses }
552    }
553
554    /// Returns the last execution status.
555    ///
556    /// This status is typically used as the final result.
557    pub fn last_status(&self) -> &ExecuteStatus {
558        self.statuses
559            .last()
560            .expect("execution statuses is non-empty")
561    }
562
563    /// Iterates over all the statuses.
564    pub fn iter(&self) -> impl DoubleEndedIterator<Item = &'_ ExecuteStatus> + '_ {
565        self.statuses.iter()
566    }
567
568    /// Returns the number of times the test was executed.
569    pub fn len(&self) -> usize {
570        self.statuses.len()
571    }
572
573    /// Returns a description of self.
574    pub fn describe(&self) -> ExecutionDescription<'_> {
575        let last_status = self.last_status();
576        if last_status.result.is_success() {
577            if self.statuses.len() > 1 {
578                ExecutionDescription::Flaky {
579                    last_status,
580                    prior_statuses: &self.statuses[..self.statuses.len() - 1],
581                }
582            } else {
583                ExecutionDescription::Success {
584                    single_status: last_status,
585                }
586            }
587        } else {
588            let first_status = self
589                .statuses
590                .first()
591                .expect("execution statuses is non-empty");
592            let retries = &self.statuses[1..];
593            ExecutionDescription::Failure {
594                first_status,
595                last_status,
596                retries,
597            }
598        }
599    }
600}
601
602/// A description of test executions obtained from `ExecuteStatuses`.
603///
604/// This can be used to quickly determine whether a test passed, failed or was flaky.
605#[derive(Copy, Clone, Debug)]
606pub enum ExecutionDescription<'a> {
607    /// The test was run once and was successful.
608    Success {
609        /// The status of the test.
610        single_status: &'a ExecuteStatus,
611    },
612
613    /// The test was run more than once. The final result was successful.
614    Flaky {
615        /// The last, successful status.
616        last_status: &'a ExecuteStatus,
617
618        /// Previous statuses, none of which are successes.
619        prior_statuses: &'a [ExecuteStatus],
620    },
621
622    /// The test was run once, or possibly multiple times. All runs failed.
623    Failure {
624        /// The first, failing status.
625        first_status: &'a ExecuteStatus,
626
627        /// The last, failing status. Same as the first status if no retries were performed.
628        last_status: &'a ExecuteStatus,
629
630        /// Any retries that were performed. All of these runs failed.
631        ///
632        /// May be empty.
633        retries: &'a [ExecuteStatus],
634    },
635}
636
637impl<'a> ExecutionDescription<'a> {
638    /// Returns the status level for this `ExecutionDescription`.
639    pub fn status_level(&self) -> StatusLevel {
640        match self {
641            ExecutionDescription::Success { single_status } => match single_status.result {
642                ExecutionResult::Leak {
643                    result: LeakTimeoutResult::Pass,
644                } => StatusLevel::Leak,
645                ExecutionResult::Pass => StatusLevel::Pass,
646                other => unreachable!("Success only permits Pass or Leak Pass, found {other:?}"),
647            },
648            // A flaky test implies that we print out retry information for it.
649            ExecutionDescription::Flaky { .. } => StatusLevel::Retry,
650            ExecutionDescription::Failure { .. } => StatusLevel::Fail,
651        }
652    }
653
654    /// Returns the final status level for this `ExecutionDescription`.
655    pub fn final_status_level(&self) -> FinalStatusLevel {
656        match self {
657            ExecutionDescription::Success { single_status, .. } => {
658                // Slow is higher priority than leaky, so return slow first here.
659                if single_status.is_slow {
660                    FinalStatusLevel::Slow
661                } else {
662                    match single_status.result {
663                        ExecutionResult::Pass => FinalStatusLevel::Pass,
664                        ExecutionResult::Leak {
665                            result: LeakTimeoutResult::Pass,
666                        } => FinalStatusLevel::Leak,
667                        other => {
668                            unreachable!("Success only permits Pass or Leak Pass, found {other:?}")
669                        }
670                    }
671                }
672            }
673            // A flaky test implies that we print out retry information for it.
674            ExecutionDescription::Flaky { .. } => FinalStatusLevel::Flaky,
675            ExecutionDescription::Failure { .. } => FinalStatusLevel::Fail,
676        }
677    }
678
679    /// Returns the last run status.
680    pub fn last_status(&self) -> &'a ExecuteStatus {
681        match self {
682            ExecutionDescription::Success {
683                single_status: last_status,
684            }
685            | ExecutionDescription::Flaky { last_status, .. }
686            | ExecutionDescription::Failure { last_status, .. } => last_status,
687        }
688    }
689}
690
691/// Information about a single execution of a test.
692#[derive(Clone, Debug)]
693pub struct ExecuteStatus {
694    /// Retry-related data.
695    pub retry_data: RetryData,
696    /// The stdout and stderr output for this test.
697    pub output: ChildExecutionOutput,
698    /// The execution result for this test: pass, fail or execution error.
699    pub result: ExecutionResult,
700    /// The time at which the test started.
701    pub start_time: DateTime<FixedOffset>,
702    /// The time it took for the test to run.
703    pub time_taken: Duration,
704    /// Whether this test counts as slow.
705    pub is_slow: bool,
706    /// The delay will be non-zero if this is a retry and delay was specified.
707    pub delay_before_start: Duration,
708}
709
710/// Information about the execution of a setup script.
711#[derive(Clone, Debug)]
712pub struct SetupScriptExecuteStatus {
713    /// Output for this setup script.
714    pub output: ChildExecutionOutput,
715
716    /// The execution result for this setup script: pass, fail or execution error.
717    pub result: ExecutionResult,
718
719    /// The time at which the script started.
720    pub start_time: DateTime<FixedOffset>,
721
722    /// The time it took for the script to run.
723    pub time_taken: Duration,
724
725    /// Whether this script counts as slow.
726    pub is_slow: bool,
727
728    /// The map of environment variables that were set by this script.
729    ///
730    /// `None` if an error occurred while running the script or reading the
731    /// environment map.
732    pub env_map: Option<SetupScriptEnvMap>,
733}
734
735/// A map of environment variables set by a setup script.
736///
737/// Part of [`SetupScriptExecuteStatus`].
738#[derive(Clone, Debug)]
739pub struct SetupScriptEnvMap {
740    /// The map of environment variables set by the script.
741    pub env_map: BTreeMap<String, String>,
742}
743
744/// Data related to retries for a test.
745#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)]
746pub struct RetryData {
747    /// The current attempt. In the range `[1, total_attempts]`.
748    pub attempt: usize,
749
750    /// The total number of times this test can be run. Equal to `1 + retries`.
751    pub total_attempts: usize,
752}
753
754impl RetryData {
755    /// Returns true if there are no more attempts after this.
756    pub fn is_last_attempt(&self) -> bool {
757        self.attempt >= self.total_attempts
758    }
759}
760
761/// Whether a test passed, failed or an error occurred while executing the test.
762#[derive(Copy, Clone, Debug, Eq, PartialEq)]
763pub enum ExecutionResult {
764    /// The test passed.
765    Pass,
766    /// The test passed but leaked handles. This usually indicates that
767    /// a subprocess that inherit standard IO was created, but it didn't shut down when
768    /// the test failed.
769    Leak {
770        /// Whether this leak was treated as a failure.
771        ///
772        /// Note the difference between `Fail { leaked: true }` and `Leak {
773        /// failed: true }`. In the former case, the test failed and also leaked
774        /// handles. In the latter case, the test passed but leaked handles, and
775        /// configuration indicated that this is a failure.
776        result: LeakTimeoutResult,
777    },
778    /// The test failed.
779    Fail {
780        /// The abort status of the test, if any (for example, the signal on Unix).
781        failure_status: FailureStatus,
782
783        /// Whether a test leaked handles. If set to true, this usually indicates that
784        /// a subprocess that inherit standard IO was created, but it didn't shut down when
785        /// the test failed.
786        leaked: bool,
787    },
788    /// An error occurred while executing the test.
789    ExecFail,
790    /// The test was terminated due to a timeout.
791    Timeout,
792}
793
794impl ExecutionResult {
795    /// Returns true if the test was successful.
796    pub fn is_success(self) -> bool {
797        match self {
798            ExecutionResult::Pass
799            | ExecutionResult::Leak {
800                result: LeakTimeoutResult::Pass,
801            } => true,
802            ExecutionResult::Leak {
803                result: LeakTimeoutResult::Fail,
804            }
805            | ExecutionResult::Fail { .. }
806            | ExecutionResult::ExecFail
807            | ExecutionResult::Timeout => false,
808        }
809    }
810}
811
812/// Failure status: either an exit code or an abort status.
813#[derive(Clone, Copy, Debug, PartialEq, Eq)]
814pub enum FailureStatus {
815    /// The test exited with a non-zero exit code.
816    ExitCode(i32),
817
818    /// The test aborted.
819    Abort(AbortStatus),
820}
821
822impl FailureStatus {
823    /// Extract the failure status from an `ExitStatus`.
824    pub fn extract(exit_status: ExitStatus) -> Self {
825        if let Some(abort_status) = AbortStatus::extract(exit_status) {
826            FailureStatus::Abort(abort_status)
827        } else {
828            FailureStatus::ExitCode(
829                exit_status
830                    .code()
831                    .expect("if abort_status is None, then code must be present"),
832            )
833        }
834    }
835}
836
837/// A regular exit code or Windows NT abort status for a test.
838///
839/// Returned as part of the [`ExecutionResult::Fail`] variant.
840#[derive(Copy, Clone, Eq, PartialEq)]
841pub enum AbortStatus {
842    /// The test was aborted due to a signal on Unix.
843    #[cfg(unix)]
844    UnixSignal(i32),
845
846    /// The test was determined to have aborted because the high bit was set on Windows.
847    #[cfg(windows)]
848    WindowsNtStatus(windows_sys::Win32::Foundation::NTSTATUS),
849
850    /// The test was terminated via job object on Windows.
851    #[cfg(windows)]
852    JobObject,
853}
854
855impl AbortStatus {
856    /// Extract the abort status from an [`ExitStatus`].
857    pub fn extract(exit_status: ExitStatus) -> Option<Self> {
858        cfg_if::cfg_if! {
859            if #[cfg(unix)] {
860                // On Unix, extract the signal if it's found.
861                use std::os::unix::process::ExitStatusExt;
862                exit_status.signal().map(AbortStatus::UnixSignal)
863            } else if #[cfg(windows)] {
864                exit_status.code().and_then(|code| {
865                    (code < 0).then_some(AbortStatus::WindowsNtStatus(code))
866                })
867            } else {
868                None
869            }
870        }
871    }
872}
873
874impl fmt::Debug for AbortStatus {
875    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
876        match self {
877            #[cfg(unix)]
878            AbortStatus::UnixSignal(signal) => write!(f, "UnixSignal({})", signal),
879            #[cfg(windows)]
880            AbortStatus::WindowsNtStatus(status) => write!(f, "WindowsNtStatus({:x})", status),
881            #[cfg(windows)]
882            AbortStatus::JobObject => write!(f, "JobObject"),
883        }
884    }
885}
886
887// Note: the order here matters -- it indicates severity of cancellation
888/// The reason why a test run is being cancelled.
889#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
890#[cfg_attr(test, derive(test_strategy::Arbitrary))]
891pub enum CancelReason {
892    /// A setup script failed.
893    SetupScriptFailure,
894
895    /// A test failed and --no-fail-fast wasn't specified.
896    TestFailure,
897
898    /// An error occurred while reporting results.
899    ReportError,
900
901    /// A termination signal (on Unix, SIGTERM or SIGHUP) was received.
902    Signal,
903
904    /// An interrupt (on Unix, Ctrl-C) was received.
905    Interrupt,
906
907    /// A second signal was received, and the run is being forcibly killed.
908    SecondSignal,
909}
910
911impl CancelReason {
912    pub(crate) fn to_static_str(self) -> &'static str {
913        match self {
914            CancelReason::SetupScriptFailure => "setup script failure",
915            CancelReason::TestFailure => "test failure",
916            CancelReason::ReportError => "reporting error",
917            CancelReason::Signal => "signal",
918            CancelReason::Interrupt => "interrupt",
919            CancelReason::SecondSignal => "second signal",
920        }
921    }
922}
923/// The kind of unit of work that nextest is executing.
924#[derive(Clone, Copy, Debug, PartialEq, Eq)]
925pub enum UnitKind {
926    /// A test.
927    Test,
928
929    /// A script (e.g. a setup script).
930    Script,
931}
932
933impl UnitKind {
934    pub(crate) const WAITING_ON_TEST_MESSAGE: &str = "waiting on test process";
935    pub(crate) const WAITING_ON_SCRIPT_MESSAGE: &str = "waiting on script process";
936
937    pub(crate) const EXECUTING_TEST_MESSAGE: &str = "executing test";
938    pub(crate) const EXECUTING_SCRIPT_MESSAGE: &str = "executing script";
939
940    pub(crate) fn waiting_on_message(&self) -> &'static str {
941        match self {
942            UnitKind::Test => Self::WAITING_ON_TEST_MESSAGE,
943            UnitKind::Script => Self::WAITING_ON_SCRIPT_MESSAGE,
944        }
945    }
946
947    pub(crate) fn executing_message(&self) -> &'static str {
948        match self {
949            UnitKind::Test => Self::EXECUTING_TEST_MESSAGE,
950            UnitKind::Script => Self::EXECUTING_SCRIPT_MESSAGE,
951        }
952    }
953}
954
955impl fmt::Display for UnitKind {
956    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
957        match self {
958            UnitKind::Script => write!(f, "script"),
959            UnitKind::Test => write!(f, "test"),
960        }
961    }
962}
963
964/// A response to an information request.
965#[derive(Clone, Debug)]
966pub enum InfoResponse<'a> {
967    /// A setup script's response.
968    SetupScript(SetupScriptInfoResponse<'a>),
969
970    /// A test's response.
971    Test(TestInfoResponse<'a>),
972}
973
974/// A setup script's response to an information request.
975#[derive(Clone, Debug)]
976pub struct SetupScriptInfoResponse<'a> {
977    /// The identifier of the setup script instance.
978    pub script_id: ScriptId,
979
980    /// The command to run.
981    pub command: &'a str,
982
983    /// The list of arguments to the command.
984    pub args: &'a [String],
985
986    /// The state of the setup script.
987    pub state: UnitState,
988
989    /// Output obtained from the setup script.
990    pub output: ChildExecutionOutput,
991}
992
993/// A test's response to an information request.
994#[derive(Clone, Debug)]
995pub struct TestInfoResponse<'a> {
996    /// The test instance that the information is about.
997    pub test_instance: TestInstanceId<'a>,
998
999    /// Information about retries.
1000    pub retry_data: RetryData,
1001
1002    /// The state of the test.
1003    pub state: UnitState,
1004
1005    /// Output obtained from the test.
1006    pub output: ChildExecutionOutput,
1007}
1008
1009/// The current state of a test or script process: running, exiting, or
1010/// terminating.
1011///
1012/// Part of information response requests.
1013#[derive(Clone, Debug)]
1014pub enum UnitState {
1015    /// The unit is currently running.
1016    Running {
1017        /// The process ID.
1018        pid: u32,
1019
1020        /// The amount of time the unit has been running.
1021        time_taken: Duration,
1022
1023        /// `Some` if the test is marked as slow, along with the duration after
1024        /// which it was marked as slow.
1025        slow_after: Option<Duration>,
1026    },
1027
1028    /// The test has finished running, and is currently in the process of
1029    /// exiting.
1030    Exiting {
1031        /// The process ID.
1032        pid: u32,
1033
1034        /// The amount of time the unit ran for.
1035        time_taken: Duration,
1036
1037        /// `Some` if the unit is marked as slow, along with the duration after
1038        /// which it was marked as slow.
1039        slow_after: Option<Duration>,
1040
1041        /// The tentative execution result before leaked status is determined.
1042        ///
1043        /// None means that the exit status could not be read, and should be
1044        /// treated as a failure.
1045        tentative_result: Option<ExecutionResult>,
1046
1047        /// How long has been spent waiting for the process to exit.
1048        waiting_duration: Duration,
1049
1050        /// How much longer nextest will wait until the test is marked leaky.
1051        remaining: Duration,
1052    },
1053
1054    /// The child process is being terminated by nextest.
1055    Terminating(UnitTerminatingState),
1056
1057    /// The unit has finished running and the process has exited.
1058    Exited {
1059        /// The result of executing the unit.
1060        result: ExecutionResult,
1061
1062        /// The amount of time the unit ran for.
1063        time_taken: Duration,
1064
1065        /// `Some` if the unit is marked as slow, along with the duration after
1066        /// which it was marked as slow.
1067        slow_after: Option<Duration>,
1068    },
1069
1070    /// A delay is being waited out before the next attempt of the test is
1071    /// started. (Only relevant for tests.)
1072    DelayBeforeNextAttempt {
1073        /// The previous execution result.
1074        previous_result: ExecutionResult,
1075
1076        /// Whether the previous attempt was marked as slow.
1077        previous_slow: bool,
1078
1079        /// How long has been spent waiting so far.
1080        waiting_duration: Duration,
1081
1082        /// How much longer nextest will wait until retrying the test.
1083        remaining: Duration,
1084    },
1085}
1086
1087impl UnitState {
1088    /// Returns true if the state has a valid output attached to it.
1089    pub fn has_valid_output(&self) -> bool {
1090        match self {
1091            UnitState::Running { .. }
1092            | UnitState::Exiting { .. }
1093            | UnitState::Terminating(_)
1094            | UnitState::Exited { .. } => true,
1095            UnitState::DelayBeforeNextAttempt { .. } => false,
1096        }
1097    }
1098}
1099
1100/// The current terminating state of a test or script process.
1101///
1102/// Part of [`UnitState::Terminating`].
1103#[derive(Clone, Debug)]
1104pub struct UnitTerminatingState {
1105    /// The process ID.
1106    pub pid: u32,
1107
1108    /// The amount of time the unit ran for.
1109    pub time_taken: Duration,
1110
1111    /// The reason for the termination.
1112    pub reason: UnitTerminateReason,
1113
1114    /// The method by which the process is being terminated.
1115    pub method: UnitTerminateMethod,
1116
1117    /// How long has been spent waiting for the process to exit.
1118    pub waiting_duration: Duration,
1119
1120    /// How much longer nextest will wait until a kill command is sent to the process.
1121    pub remaining: Duration,
1122}
1123
1124/// The reason for a script or test being forcibly terminated by nextest.
1125///
1126/// Part of information response requests.
1127#[derive(Clone, Copy, Debug)]
1128pub enum UnitTerminateReason {
1129    /// The unit is being terminated due to a test timeout being hit.
1130    Timeout,
1131
1132    /// The unit is being terminated due to nextest receiving a signal.
1133    Signal,
1134
1135    /// The unit is being terminated due to an interrupt (i.e. Ctrl-C).
1136    Interrupt,
1137}
1138
1139impl fmt::Display for UnitTerminateReason {
1140    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1141        match self {
1142            UnitTerminateReason::Timeout => write!(f, "timeout"),
1143            UnitTerminateReason::Signal => write!(f, "signal"),
1144            UnitTerminateReason::Interrupt => write!(f, "interrupt"),
1145        }
1146    }
1147}
1148
1149/// The way in which a script or test is being forcibly terminated by nextest.
1150#[derive(Clone, Copy, Debug)]
1151pub enum UnitTerminateMethod {
1152    /// The unit is being terminated by sending a signal.
1153    #[cfg(unix)]
1154    Signal(UnitTerminateSignal),
1155
1156    /// The unit is being terminated by terminating the Windows job object.
1157    #[cfg(windows)]
1158    JobObject,
1159
1160    /// The unit is being waited on to exit. A termination signal will be sent
1161    /// if it doesn't exit within the grace period.
1162    ///
1163    /// On Windows, this occurs when nextest receives Ctrl-C. In that case, it
1164    /// is assumed that tests will also receive Ctrl-C and exit on their own. If
1165    /// tests do not exit within the grace period configured for them, their
1166    /// corresponding job objects will be terminated.
1167    #[cfg(windows)]
1168    Wait,
1169
1170    /// A fake method used for testing.
1171    #[cfg(test)]
1172    Fake,
1173}
1174
1175#[cfg(unix)]
1176/// The signal that is or was sent to terminate a script or test.
1177#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1178pub enum UnitTerminateSignal {
1179    /// The unit is being terminated by sending a SIGINT.
1180    Interrupt,
1181
1182    /// The unit is being terminated by sending a SIGTERM signal.
1183    Term,
1184
1185    /// The unit is being terminated by sending a SIGHUP signal.
1186    Hangup,
1187
1188    /// The unit is being terminated by sending a SIGQUIT signal.
1189    Quit,
1190
1191    /// The unit is being terminated by sending a SIGKILL signal.
1192    Kill,
1193}
1194
1195#[cfg(unix)]
1196impl fmt::Display for UnitTerminateSignal {
1197    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1198        match self {
1199            UnitTerminateSignal::Interrupt => write!(f, "SIGINT"),
1200            UnitTerminateSignal::Term => write!(f, "SIGTERM"),
1201            UnitTerminateSignal::Hangup => write!(f, "SIGHUP"),
1202            UnitTerminateSignal::Quit => write!(f, "SIGQUIT"),
1203            UnitTerminateSignal::Kill => write!(f, "SIGKILL"),
1204        }
1205    }
1206}
1207
1208#[cfg(test)]
1209mod tests {
1210    use super::*;
1211
1212    #[test]
1213    fn test_is_success() {
1214        assert_eq!(
1215            RunStats::default().summarize_final(),
1216            FinalRunStats::NoTestsRun,
1217            "empty run => no tests run"
1218        );
1219        assert_eq!(
1220            RunStats {
1221                initial_run_count: 42,
1222                finished_count: 42,
1223                ..RunStats::default()
1224            }
1225            .summarize_final(),
1226            FinalRunStats::Success,
1227            "initial run count = final run count => success"
1228        );
1229        assert_eq!(
1230            RunStats {
1231                initial_run_count: 42,
1232                finished_count: 41,
1233                ..RunStats::default()
1234            }
1235            .summarize_final(),
1236            FinalRunStats::Cancelled(RunStatsFailureKind::Test {
1237                initial_run_count: 42,
1238                not_run: 1
1239            }),
1240            "initial run count > final run count => cancelled"
1241        );
1242        assert_eq!(
1243            RunStats {
1244                initial_run_count: 42,
1245                finished_count: 42,
1246                failed: 1,
1247                ..RunStats::default()
1248            }
1249            .summarize_final(),
1250            FinalRunStats::Failed(RunStatsFailureKind::Test {
1251                initial_run_count: 42,
1252                not_run: 0
1253            }),
1254            "failed => failure"
1255        );
1256        assert_eq!(
1257            RunStats {
1258                initial_run_count: 42,
1259                finished_count: 42,
1260                exec_failed: 1,
1261                ..RunStats::default()
1262            }
1263            .summarize_final(),
1264            FinalRunStats::Failed(RunStatsFailureKind::Test {
1265                initial_run_count: 42,
1266                not_run: 0
1267            }),
1268            "exec failed => failure"
1269        );
1270        assert_eq!(
1271            RunStats {
1272                initial_run_count: 42,
1273                finished_count: 42,
1274                timed_out: 1,
1275                ..RunStats::default()
1276            }
1277            .summarize_final(),
1278            FinalRunStats::Failed(RunStatsFailureKind::Test {
1279                initial_run_count: 42,
1280                not_run: 0
1281            }),
1282            "timed out => failure"
1283        );
1284        assert_eq!(
1285            RunStats {
1286                initial_run_count: 42,
1287                finished_count: 42,
1288                skipped: 1,
1289                ..RunStats::default()
1290            }
1291            .summarize_final(),
1292            FinalRunStats::Success,
1293            "skipped => not considered a failure"
1294        );
1295
1296        assert_eq!(
1297            RunStats {
1298                setup_scripts_initial_count: 2,
1299                setup_scripts_finished_count: 1,
1300                ..RunStats::default()
1301            }
1302            .summarize_final(),
1303            FinalRunStats::Cancelled(RunStatsFailureKind::SetupScript),
1304            "setup script failed => failure"
1305        );
1306
1307        assert_eq!(
1308            RunStats {
1309                setup_scripts_initial_count: 2,
1310                setup_scripts_finished_count: 2,
1311                setup_scripts_failed: 1,
1312                ..RunStats::default()
1313            }
1314            .summarize_final(),
1315            FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1316            "setup script failed => failure"
1317        );
1318        assert_eq!(
1319            RunStats {
1320                setup_scripts_initial_count: 2,
1321                setup_scripts_finished_count: 2,
1322                setup_scripts_exec_failed: 1,
1323                ..RunStats::default()
1324            }
1325            .summarize_final(),
1326            FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1327            "setup script exec failed => failure"
1328        );
1329        assert_eq!(
1330            RunStats {
1331                setup_scripts_initial_count: 2,
1332                setup_scripts_finished_count: 2,
1333                setup_scripts_timed_out: 1,
1334                ..RunStats::default()
1335            }
1336            .summarize_final(),
1337            FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1338            "setup script timed out => failure"
1339        );
1340        assert_eq!(
1341            RunStats {
1342                setup_scripts_initial_count: 2,
1343                setup_scripts_finished_count: 2,
1344                setup_scripts_passed: 2,
1345                ..RunStats::default()
1346            }
1347            .summarize_final(),
1348            FinalRunStats::NoTestsRun,
1349            "setup scripts passed => success, but no tests run"
1350        );
1351    }
1352}