nextest_runner/reporter/events.rs
1// Copyright (c) The nextest Contributors
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Events for the reporter.
5//!
6//! These types form the interface between the test runner and the test
7//! reporter. The root structure for all events is [`TestEvent`].
8
9use super::{FinalStatusLevel, StatusLevel, TestOutputDisplay};
10use crate::{
11 config::{LeakTimeoutResult, ScriptId},
12 list::{TestInstance, TestInstanceId, TestList},
13 test_output::ChildExecutionOutput,
14};
15use chrono::{DateTime, FixedOffset};
16use nextest_metadata::MismatchReason;
17use quick_junit::ReportUuid;
18use std::{collections::BTreeMap, fmt, process::ExitStatus, time::Duration};
19
20/// A test event.
21///
22/// Events are produced by a [`TestRunner`](crate::runner::TestRunner) and
23/// consumed by a [`Reporter`](crate::reporter::Reporter).
24#[derive(Clone, Debug)]
25pub struct TestEvent<'a> {
26 /// The time at which the event was generated, including the offset from UTC.
27 pub timestamp: DateTime<FixedOffset>,
28
29 /// The amount of time elapsed since the start of the test run.
30 pub elapsed: Duration,
31
32 /// The kind of test event this is.
33 pub kind: TestEventKind<'a>,
34}
35
36/// The kind of test event this is.
37///
38/// Forms part of [`TestEvent`].
39#[derive(Clone, Debug)]
40pub enum TestEventKind<'a> {
41 /// The test run started.
42 RunStarted {
43 /// The list of tests that will be run.
44 ///
45 /// The methods on the test list indicate the number of tests that will be run.
46 test_list: &'a TestList<'a>,
47
48 /// The UUID for this run.
49 run_id: ReportUuid,
50
51 /// The nextest profile chosen for this run.
52 profile_name: String,
53
54 /// The command-line arguments for the process.
55 cli_args: Vec<String>,
56 },
57
58 /// A setup script started.
59 SetupScriptStarted {
60 /// The setup script index.
61 index: usize,
62
63 /// The total number of setup scripts.
64 total: usize,
65
66 /// The script ID.
67 script_id: ScriptId,
68
69 /// The program to run.
70 program: String,
71
72 /// The arguments to the program.
73 args: &'a [String],
74
75 /// True if some output from the setup script is being passed through.
76 no_capture: bool,
77 },
78
79 /// A setup script was slow.
80 SetupScriptSlow {
81 /// The script ID.
82 script_id: ScriptId,
83
84 /// The program to run.
85 program: String,
86
87 /// The arguments to the program.
88 args: &'a [String],
89
90 /// The amount of time elapsed since the start of execution.
91 elapsed: Duration,
92
93 /// True if the script has hit its timeout and is about to be terminated.
94 will_terminate: bool,
95 },
96
97 /// A setup script completed execution.
98 SetupScriptFinished {
99 /// The setup script index.
100 index: usize,
101
102 /// The total number of setup scripts.
103 total: usize,
104
105 /// The script ID.
106 script_id: ScriptId,
107
108 /// The program to run.
109 program: String,
110
111 /// The arguments to the program.
112 args: &'a [String],
113
114 /// Whether the JUnit report should store success output for this script.
115 junit_store_success_output: bool,
116
117 /// Whether the JUnit report should store failure output for this script.
118 junit_store_failure_output: bool,
119
120 /// True if some output from the setup script was passed through.
121 no_capture: bool,
122
123 /// The execution status of the setup script.
124 run_status: SetupScriptExecuteStatus,
125 },
126
127 // TODO: add events for BinaryStarted and BinaryFinished? May want a slightly different way to
128 // do things, maybe a couple of reporter traits (one for the run as a whole and one for each
129 // binary).
130 /// A test started running.
131 TestStarted {
132 /// The test instance that was started.
133 test_instance: TestInstance<'a>,
134
135 /// Current run statistics so far.
136 current_stats: RunStats,
137
138 /// The number of tests currently running, including this one.
139 running: usize,
140
141 /// The cancel status of the run. This is None if the run is still ongoing.
142 cancel_state: Option<CancelReason>,
143 },
144
145 /// A test was slower than a configured soft timeout.
146 TestSlow {
147 /// The test instance that was slow.
148 test_instance: TestInstance<'a>,
149
150 /// Retry data.
151 retry_data: RetryData,
152
153 /// The amount of time that has elapsed since the beginning of the test.
154 elapsed: Duration,
155
156 /// True if the test has hit its timeout and is about to be terminated.
157 will_terminate: bool,
158 },
159
160 /// A test attempt failed and will be retried in the future.
161 ///
162 /// This event does not occur on the final run of a failing test.
163 TestAttemptFailedWillRetry {
164 /// The test instance that is being retried.
165 test_instance: TestInstance<'a>,
166
167 /// The status of this attempt to run the test. Will never be success.
168 run_status: ExecuteStatus,
169
170 /// The delay before the next attempt to run the test.
171 delay_before_next_attempt: Duration,
172
173 /// Whether failure outputs are printed out.
174 failure_output: TestOutputDisplay,
175 },
176
177 /// A retry has started.
178 TestRetryStarted {
179 /// The test instance that is being retried.
180 test_instance: TestInstance<'a>,
181
182 /// Data related to retries.
183 retry_data: RetryData,
184 },
185
186 /// A test finished running.
187 TestFinished {
188 /// The test instance that finished running.
189 test_instance: TestInstance<'a>,
190
191 /// Test setting for success output.
192 success_output: TestOutputDisplay,
193
194 /// Test setting for failure output.
195 failure_output: TestOutputDisplay,
196
197 /// Whether the JUnit report should store success output for this test.
198 junit_store_success_output: bool,
199
200 /// Whether the JUnit report should store failure output for this test.
201 junit_store_failure_output: bool,
202
203 /// Information about all the runs for this test.
204 run_statuses: ExecutionStatuses,
205
206 /// Current statistics for number of tests so far.
207 current_stats: RunStats,
208
209 /// The number of tests that are currently running, excluding this one.
210 running: usize,
211
212 /// The cancel status of the run. This is None if the run is still ongoing.
213 cancel_state: Option<CancelReason>,
214 },
215
216 /// A test was skipped.
217 TestSkipped {
218 /// The test instance that was skipped.
219 test_instance: TestInstance<'a>,
220
221 /// The reason this test was skipped.
222 reason: MismatchReason,
223 },
224
225 /// An information request was received.
226 InfoStarted {
227 /// The number of tasks currently running. This is the same as the
228 /// number of expected responses.
229 total: usize,
230
231 /// Statistics for the run.
232 run_stats: RunStats,
233 },
234
235 /// Information about a script or test was received.
236 InfoResponse {
237 /// The index of the response, starting from 0.
238 index: usize,
239
240 /// The total number of responses expected.
241 total: usize,
242
243 /// The response itself.
244 response: InfoResponse<'a>,
245 },
246
247 /// An information request was completed.
248 InfoFinished {
249 /// The number of responses that were not received. In most cases, this
250 /// is 0.
251 missing: usize,
252 },
253
254 /// `Enter` was pressed. Either a newline or a progress bar snapshot needs
255 /// to be printed.
256 InputEnter {
257 /// Current statistics for number of tests so far.
258 current_stats: RunStats,
259
260 /// The number of tests running.
261 running: usize,
262
263 /// The cancel status of the run. This is None if the run is still ongoing.
264 cancel_reason: Option<CancelReason>,
265 },
266
267 /// A cancellation notice was received.
268 RunBeginCancel {
269 /// The number of setup scripts still running.
270 setup_scripts_running: usize,
271
272 /// Current statistics for number of tests so far.
273 current_stats: RunStats,
274
275 /// The number of tests still running.
276 running: usize,
277
278 /// The reason this run was cancelled.
279 reason: CancelReason,
280 },
281
282 /// A forcible kill was requested due to receiving a signal.
283 RunBeginKill {
284 /// The number of setup scripts still running.
285 setup_scripts_running: usize,
286
287 /// Current statistics for number of tests so far.
288 current_stats: RunStats,
289
290 /// The number of tests still running.
291 running: usize,
292
293 /// The reason this run was killed.
294 reason: CancelReason,
295 },
296
297 /// A SIGTSTP event was received and the run was paused.
298 RunPaused {
299 /// The number of setup scripts running.
300 setup_scripts_running: usize,
301
302 /// The number of tests currently running.
303 running: usize,
304 },
305
306 /// A SIGCONT event was received and the run is being continued.
307 RunContinued {
308 /// The number of setup scripts that will be started up again.
309 setup_scripts_running: usize,
310
311 /// The number of tests that will be started up again.
312 running: usize,
313 },
314
315 /// The test run finished.
316 RunFinished {
317 /// The unique ID for this run.
318 run_id: ReportUuid,
319
320 /// The time at which the run was started.
321 start_time: DateTime<FixedOffset>,
322
323 /// The amount of time it took for the tests to run.
324 elapsed: Duration,
325
326 /// Statistics for the run.
327 run_stats: RunStats,
328 },
329}
330
331/// Statistics for a test run.
332#[derive(Copy, Clone, Default, Debug, Eq, PartialEq)]
333pub struct RunStats {
334 /// The total number of tests that were expected to be run at the beginning.
335 ///
336 /// If the test run is cancelled, this will be more than `finished_count` at the end.
337 pub initial_run_count: usize,
338
339 /// The total number of tests that finished running.
340 pub finished_count: usize,
341
342 /// The total number of setup scripts that were expected to be run at the beginning.
343 ///
344 /// If the test run is cancelled, this will be more than `finished_count` at the end.
345 pub setup_scripts_initial_count: usize,
346
347 /// The total number of setup scripts that finished running.
348 pub setup_scripts_finished_count: usize,
349
350 /// The number of setup scripts that passed.
351 pub setup_scripts_passed: usize,
352
353 /// The number of setup scripts that failed.
354 pub setup_scripts_failed: usize,
355
356 /// The number of setup scripts that encountered an execution failure.
357 pub setup_scripts_exec_failed: usize,
358
359 /// The number of setup scripts that timed out.
360 pub setup_scripts_timed_out: usize,
361
362 /// The number of tests that passed. Includes `passed_slow`, `flaky` and `leaky`.
363 pub passed: usize,
364
365 /// The number of slow tests that passed.
366 pub passed_slow: usize,
367
368 /// The number of tests that passed on retry.
369 pub flaky: usize,
370
371 /// The number of tests that failed.
372 pub failed: usize,
373
374 /// The number of failed tests that were slow.
375 pub failed_slow: usize,
376
377 /// The number of tests that timed out.
378 pub timed_out: usize,
379
380 /// The number of tests that passed but leaked handles.
381 pub leaky: usize,
382
383 /// The number of tests that otherwise passed, but leaked handles and were
384 /// treated as failed as a result.
385 pub leaky_failed: usize,
386
387 /// The number of tests that encountered an execution failure.
388 pub exec_failed: usize,
389
390 /// The number of tests that were skipped.
391 pub skipped: usize,
392}
393
394impl RunStats {
395 /// Returns true if there are any failures recorded in the stats.
396 pub fn has_failures(&self) -> bool {
397 self.failed_setup_script_count() > 0 || self.failed_count() > 0
398 }
399
400 /// Returns count of setup scripts that did not pass.
401 pub fn failed_setup_script_count(&self) -> usize {
402 self.setup_scripts_failed + self.setup_scripts_exec_failed + self.setup_scripts_timed_out
403 }
404
405 /// Returns count of tests that did not pass.
406 pub fn failed_count(&self) -> usize {
407 self.failed + self.exec_failed + self.timed_out
408 }
409
410 /// Summarizes the stats as an enum at the end of a test run.
411 pub fn summarize_final(&self) -> FinalRunStats {
412 // Check for failures first. The order of setup scripts vs tests should not be important,
413 // though we don't assert that here.
414 if self.failed_setup_script_count() > 0 {
415 FinalRunStats::Failed(RunStatsFailureKind::SetupScript)
416 } else if self.setup_scripts_initial_count > self.setup_scripts_finished_count {
417 FinalRunStats::Cancelled(RunStatsFailureKind::SetupScript)
418 } else if self.failed_count() > 0 {
419 FinalRunStats::Failed(RunStatsFailureKind::Test {
420 initial_run_count: self.initial_run_count,
421 not_run: self.initial_run_count.saturating_sub(self.finished_count),
422 })
423 } else if self.initial_run_count > self.finished_count {
424 FinalRunStats::Cancelled(RunStatsFailureKind::Test {
425 initial_run_count: self.initial_run_count,
426 not_run: self.initial_run_count.saturating_sub(self.finished_count),
427 })
428 } else if self.finished_count == 0 {
429 FinalRunStats::NoTestsRun
430 } else {
431 FinalRunStats::Success
432 }
433 }
434
435 pub(crate) fn on_setup_script_finished(&mut self, status: &SetupScriptExecuteStatus) {
436 self.setup_scripts_finished_count += 1;
437
438 match status.result {
439 ExecutionResult::Pass
440 | ExecutionResult::Leak {
441 result: LeakTimeoutResult::Pass,
442 } => {
443 self.setup_scripts_passed += 1;
444 }
445 ExecutionResult::Fail { .. }
446 | ExecutionResult::Leak {
447 result: LeakTimeoutResult::Fail,
448 } => {
449 self.setup_scripts_failed += 1;
450 }
451 ExecutionResult::ExecFail => {
452 self.setup_scripts_exec_failed += 1;
453 }
454 ExecutionResult::Timeout => {
455 self.setup_scripts_timed_out += 1;
456 }
457 }
458 }
459
460 pub(crate) fn on_test_finished(&mut self, run_statuses: &ExecutionStatuses) {
461 self.finished_count += 1;
462 // run_statuses is guaranteed to have at least one element.
463 // * If the last element is success, treat it as success (and possibly flaky).
464 // * If the last element is a failure, use it to determine fail/exec fail.
465 // Note that this is different from what Maven Surefire does (use the first failure):
466 // https://maven.apache.org/surefire/maven-surefire-plugin/examples/rerun-failing-tests.html
467 //
468 // This is not likely to matter much in practice since failures are likely to be of the
469 // same type.
470 let last_status = run_statuses.last_status();
471 match last_status.result {
472 ExecutionResult::Pass => {
473 self.passed += 1;
474 if last_status.is_slow {
475 self.passed_slow += 1;
476 }
477 if run_statuses.len() > 1 {
478 self.flaky += 1;
479 }
480 }
481 ExecutionResult::Leak {
482 result: LeakTimeoutResult::Pass,
483 } => {
484 self.passed += 1;
485 self.leaky += 1;
486 if last_status.is_slow {
487 self.passed_slow += 1;
488 }
489 if run_statuses.len() > 1 {
490 self.flaky += 1;
491 }
492 }
493 ExecutionResult::Leak {
494 result: LeakTimeoutResult::Fail,
495 } => {
496 self.failed += 1;
497 self.leaky_failed += 1;
498 if last_status.is_slow {
499 self.failed_slow += 1;
500 }
501 }
502 ExecutionResult::Fail { .. } => {
503 self.failed += 1;
504 if last_status.is_slow {
505 self.failed_slow += 1;
506 }
507 }
508 ExecutionResult::Timeout => self.timed_out += 1,
509 ExecutionResult::ExecFail => self.exec_failed += 1,
510 }
511 }
512}
513
514/// A type summarizing the possible outcomes of a test run.
515#[derive(Copy, Clone, Debug, Eq, PartialEq)]
516pub enum FinalRunStats {
517 /// The test run was successful, or is successful so far.
518 Success,
519
520 /// The test run was successful, or is successful so far, but no tests were selected to run.
521 NoTestsRun,
522
523 /// The test run was cancelled.
524 Cancelled(RunStatsFailureKind),
525
526 /// At least one test failed.
527 Failed(RunStatsFailureKind),
528}
529
530/// A type summarizing the step at which a test run failed.
531#[derive(Copy, Clone, Debug, Eq, PartialEq)]
532pub enum RunStatsFailureKind {
533 /// The run was interrupted during setup script execution.
534 SetupScript,
535
536 /// The run was interrupted during test execution.
537 Test {
538 /// The total number of tests scheduled.
539 initial_run_count: usize,
540
541 /// The number of tests not run, or for a currently-executing test the number queued up to
542 /// run.
543 not_run: usize,
544 },
545}
546
547/// Information about executions of a test, including retries.
548#[derive(Clone, Debug)]
549pub struct ExecutionStatuses {
550 /// This is guaranteed to be non-empty.
551 statuses: Vec<ExecuteStatus>,
552}
553
554#[expect(clippy::len_without_is_empty)] // RunStatuses is never empty
555impl ExecutionStatuses {
556 pub(crate) fn new(statuses: Vec<ExecuteStatus>) -> Self {
557 Self { statuses }
558 }
559
560 /// Returns the last execution status.
561 ///
562 /// This status is typically used as the final result.
563 pub fn last_status(&self) -> &ExecuteStatus {
564 self.statuses
565 .last()
566 .expect("execution statuses is non-empty")
567 }
568
569 /// Iterates over all the statuses.
570 pub fn iter(&self) -> impl DoubleEndedIterator<Item = &'_ ExecuteStatus> + '_ {
571 self.statuses.iter()
572 }
573
574 /// Returns the number of times the test was executed.
575 pub fn len(&self) -> usize {
576 self.statuses.len()
577 }
578
579 /// Returns a description of self.
580 pub fn describe(&self) -> ExecutionDescription<'_> {
581 let last_status = self.last_status();
582 if last_status.result.is_success() {
583 if self.statuses.len() > 1 {
584 ExecutionDescription::Flaky {
585 last_status,
586 prior_statuses: &self.statuses[..self.statuses.len() - 1],
587 }
588 } else {
589 ExecutionDescription::Success {
590 single_status: last_status,
591 }
592 }
593 } else {
594 let first_status = self
595 .statuses
596 .first()
597 .expect("execution statuses is non-empty");
598 let retries = &self.statuses[1..];
599 ExecutionDescription::Failure {
600 first_status,
601 last_status,
602 retries,
603 }
604 }
605 }
606}
607
608/// A description of test executions obtained from `ExecuteStatuses`.
609///
610/// This can be used to quickly determine whether a test passed, failed or was flaky.
611#[derive(Copy, Clone, Debug)]
612pub enum ExecutionDescription<'a> {
613 /// The test was run once and was successful.
614 Success {
615 /// The status of the test.
616 single_status: &'a ExecuteStatus,
617 },
618
619 /// The test was run more than once. The final result was successful.
620 Flaky {
621 /// The last, successful status.
622 last_status: &'a ExecuteStatus,
623
624 /// Previous statuses, none of which are successes.
625 prior_statuses: &'a [ExecuteStatus],
626 },
627
628 /// The test was run once, or possibly multiple times. All runs failed.
629 Failure {
630 /// The first, failing status.
631 first_status: &'a ExecuteStatus,
632
633 /// The last, failing status. Same as the first status if no retries were performed.
634 last_status: &'a ExecuteStatus,
635
636 /// Any retries that were performed. All of these runs failed.
637 ///
638 /// May be empty.
639 retries: &'a [ExecuteStatus],
640 },
641}
642
643impl<'a> ExecutionDescription<'a> {
644 /// Returns the status level for this `ExecutionDescription`.
645 pub fn status_level(&self) -> StatusLevel {
646 match self {
647 ExecutionDescription::Success { single_status } => match single_status.result {
648 ExecutionResult::Leak {
649 result: LeakTimeoutResult::Pass,
650 } => StatusLevel::Leak,
651 ExecutionResult::Pass => StatusLevel::Pass,
652 other => unreachable!("Success only permits Pass or Leak Pass, found {other:?}"),
653 },
654 // A flaky test implies that we print out retry information for it.
655 ExecutionDescription::Flaky { .. } => StatusLevel::Retry,
656 ExecutionDescription::Failure { .. } => StatusLevel::Fail,
657 }
658 }
659
660 /// Returns the final status level for this `ExecutionDescription`.
661 pub fn final_status_level(&self) -> FinalStatusLevel {
662 match self {
663 ExecutionDescription::Success { single_status, .. } => {
664 // Slow is higher priority than leaky, so return slow first here.
665 if single_status.is_slow {
666 FinalStatusLevel::Slow
667 } else {
668 match single_status.result {
669 ExecutionResult::Pass => FinalStatusLevel::Pass,
670 ExecutionResult::Leak {
671 result: LeakTimeoutResult::Pass,
672 } => FinalStatusLevel::Leak,
673 other => {
674 unreachable!("Success only permits Pass or Leak Pass, found {other:?}")
675 }
676 }
677 }
678 }
679 // A flaky test implies that we print out retry information for it.
680 ExecutionDescription::Flaky { .. } => FinalStatusLevel::Flaky,
681 ExecutionDescription::Failure { .. } => FinalStatusLevel::Fail,
682 }
683 }
684
685 /// Returns the last run status.
686 pub fn last_status(&self) -> &'a ExecuteStatus {
687 match self {
688 ExecutionDescription::Success {
689 single_status: last_status,
690 }
691 | ExecutionDescription::Flaky { last_status, .. }
692 | ExecutionDescription::Failure { last_status, .. } => last_status,
693 }
694 }
695}
696
697/// Information about a single execution of a test.
698#[derive(Clone, Debug)]
699pub struct ExecuteStatus {
700 /// Retry-related data.
701 pub retry_data: RetryData,
702 /// The stdout and stderr output for this test.
703 pub output: ChildExecutionOutput,
704 /// The execution result for this test: pass, fail or execution error.
705 pub result: ExecutionResult,
706 /// The time at which the test started.
707 pub start_time: DateTime<FixedOffset>,
708 /// The time it took for the test to run.
709 pub time_taken: Duration,
710 /// Whether this test counts as slow.
711 pub is_slow: bool,
712 /// The delay will be non-zero if this is a retry and delay was specified.
713 pub delay_before_start: Duration,
714}
715
716/// Information about the execution of a setup script.
717#[derive(Clone, Debug)]
718pub struct SetupScriptExecuteStatus {
719 /// Output for this setup script.
720 pub output: ChildExecutionOutput,
721
722 /// The execution result for this setup script: pass, fail or execution error.
723 pub result: ExecutionResult,
724
725 /// The time at which the script started.
726 pub start_time: DateTime<FixedOffset>,
727
728 /// The time it took for the script to run.
729 pub time_taken: Duration,
730
731 /// Whether this script counts as slow.
732 pub is_slow: bool,
733
734 /// The map of environment variables that were set by this script.
735 ///
736 /// `None` if an error occurred while running the script or reading the
737 /// environment map.
738 pub env_map: Option<SetupScriptEnvMap>,
739}
740
741/// A map of environment variables set by a setup script.
742///
743/// Part of [`SetupScriptExecuteStatus`].
744#[derive(Clone, Debug)]
745pub struct SetupScriptEnvMap {
746 /// The map of environment variables set by the script.
747 pub env_map: BTreeMap<String, String>,
748}
749
750/// Data related to retries for a test.
751#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)]
752pub struct RetryData {
753 /// The current attempt. In the range `[1, total_attempts]`.
754 pub attempt: usize,
755
756 /// The total number of times this test can be run. Equal to `1 + retries`.
757 pub total_attempts: usize,
758}
759
760impl RetryData {
761 /// Returns true if there are no more attempts after this.
762 pub fn is_last_attempt(&self) -> bool {
763 self.attempt >= self.total_attempts
764 }
765}
766
767/// Whether a test passed, failed or an error occurred while executing the test.
768#[derive(Copy, Clone, Debug, Eq, PartialEq)]
769pub enum ExecutionResult {
770 /// The test passed.
771 Pass,
772 /// The test passed but leaked handles. This usually indicates that
773 /// a subprocess that inherit standard IO was created, but it didn't shut down when
774 /// the test failed.
775 Leak {
776 /// Whether this leak was treated as a failure.
777 ///
778 /// Note the difference between `Fail { leaked: true }` and `Leak {
779 /// failed: true }`. In the former case, the test failed and also leaked
780 /// handles. In the latter case, the test passed but leaked handles, and
781 /// configuration indicated that this is a failure.
782 result: LeakTimeoutResult,
783 },
784 /// The test failed.
785 Fail {
786 /// The abort status of the test, if any (for example, the signal on Unix).
787 failure_status: FailureStatus,
788
789 /// Whether a test leaked handles. If set to true, this usually indicates that
790 /// a subprocess that inherit standard IO was created, but it didn't shut down when
791 /// the test failed.
792 leaked: bool,
793 },
794 /// An error occurred while executing the test.
795 ExecFail,
796 /// The test was terminated due to a timeout.
797 Timeout,
798}
799
800impl ExecutionResult {
801 /// Returns true if the test was successful.
802 pub fn is_success(self) -> bool {
803 match self {
804 ExecutionResult::Pass
805 | ExecutionResult::Leak {
806 result: LeakTimeoutResult::Pass,
807 } => true,
808 ExecutionResult::Leak {
809 result: LeakTimeoutResult::Fail,
810 }
811 | ExecutionResult::Fail { .. }
812 | ExecutionResult::ExecFail
813 | ExecutionResult::Timeout => false,
814 }
815 }
816}
817
818/// Failure status: either an exit code or an abort status.
819#[derive(Clone, Copy, Debug, PartialEq, Eq)]
820pub enum FailureStatus {
821 /// The test exited with a non-zero exit code.
822 ExitCode(i32),
823
824 /// The test aborted.
825 Abort(AbortStatus),
826}
827
828impl FailureStatus {
829 /// Extract the failure status from an `ExitStatus`.
830 pub fn extract(exit_status: ExitStatus) -> Self {
831 if let Some(abort_status) = AbortStatus::extract(exit_status) {
832 FailureStatus::Abort(abort_status)
833 } else {
834 FailureStatus::ExitCode(
835 exit_status
836 .code()
837 .expect("if abort_status is None, then code must be present"),
838 )
839 }
840 }
841}
842
843/// A regular exit code or Windows NT abort status for a test.
844///
845/// Returned as part of the [`ExecutionResult::Fail`] variant.
846#[derive(Copy, Clone, Eq, PartialEq)]
847pub enum AbortStatus {
848 /// The test was aborted due to a signal on Unix.
849 #[cfg(unix)]
850 UnixSignal(i32),
851
852 /// The test was determined to have aborted because the high bit was set on Windows.
853 #[cfg(windows)]
854 WindowsNtStatus(windows_sys::Win32::Foundation::NTSTATUS),
855
856 /// The test was terminated via job object on Windows.
857 #[cfg(windows)]
858 JobObject,
859}
860
861impl AbortStatus {
862 /// Extract the abort status from an [`ExitStatus`].
863 pub fn extract(exit_status: ExitStatus) -> Option<Self> {
864 cfg_if::cfg_if! {
865 if #[cfg(unix)] {
866 // On Unix, extract the signal if it's found.
867 use std::os::unix::process::ExitStatusExt;
868 exit_status.signal().map(AbortStatus::UnixSignal)
869 } else if #[cfg(windows)] {
870 exit_status.code().and_then(|code| {
871 (code < 0).then_some(AbortStatus::WindowsNtStatus(code))
872 })
873 } else {
874 None
875 }
876 }
877 }
878}
879
880impl fmt::Debug for AbortStatus {
881 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
882 match self {
883 #[cfg(unix)]
884 AbortStatus::UnixSignal(signal) => write!(f, "UnixSignal({signal})"),
885 #[cfg(windows)]
886 AbortStatus::WindowsNtStatus(status) => write!(f, "WindowsNtStatus({status:x})"),
887 #[cfg(windows)]
888 AbortStatus::JobObject => write!(f, "JobObject"),
889 }
890 }
891}
892
893// Note: the order here matters -- it indicates severity of cancellation
894/// The reason why a test run is being cancelled.
895#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
896#[cfg_attr(test, derive(test_strategy::Arbitrary))]
897pub enum CancelReason {
898 /// A setup script failed.
899 SetupScriptFailure,
900
901 /// A test failed and --no-fail-fast wasn't specified.
902 TestFailure,
903
904 /// An error occurred while reporting results.
905 ReportError,
906
907 /// The global timeout was exceeded.
908 GlobalTimeout,
909
910 /// A termination signal (on Unix, SIGTERM or SIGHUP) was received.
911 Signal,
912
913 /// An interrupt (on Unix, Ctrl-C) was received.
914 Interrupt,
915
916 /// A second signal was received, and the run is being forcibly killed.
917 SecondSignal,
918}
919
920impl CancelReason {
921 pub(crate) fn to_static_str(self) -> &'static str {
922 match self {
923 CancelReason::SetupScriptFailure => "setup script failure",
924 CancelReason::TestFailure => "test failure",
925 CancelReason::ReportError => "reporting error",
926 CancelReason::GlobalTimeout => "global timeout",
927 CancelReason::Signal => "signal",
928 CancelReason::Interrupt => "interrupt",
929 CancelReason::SecondSignal => "second signal",
930 }
931 }
932}
933/// The kind of unit of work that nextest is executing.
934#[derive(Clone, Copy, Debug, PartialEq, Eq)]
935pub enum UnitKind {
936 /// A test.
937 Test,
938
939 /// A script (e.g. a setup script).
940 Script,
941}
942
943impl UnitKind {
944 pub(crate) const WAITING_ON_TEST_MESSAGE: &str = "waiting on test process";
945 pub(crate) const WAITING_ON_SCRIPT_MESSAGE: &str = "waiting on script process";
946
947 pub(crate) const EXECUTING_TEST_MESSAGE: &str = "executing test";
948 pub(crate) const EXECUTING_SCRIPT_MESSAGE: &str = "executing script";
949
950 pub(crate) fn waiting_on_message(&self) -> &'static str {
951 match self {
952 UnitKind::Test => Self::WAITING_ON_TEST_MESSAGE,
953 UnitKind::Script => Self::WAITING_ON_SCRIPT_MESSAGE,
954 }
955 }
956
957 pub(crate) fn executing_message(&self) -> &'static str {
958 match self {
959 UnitKind::Test => Self::EXECUTING_TEST_MESSAGE,
960 UnitKind::Script => Self::EXECUTING_SCRIPT_MESSAGE,
961 }
962 }
963}
964
965impl fmt::Display for UnitKind {
966 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
967 match self {
968 UnitKind::Script => write!(f, "script"),
969 UnitKind::Test => write!(f, "test"),
970 }
971 }
972}
973
974/// A response to an information request.
975#[derive(Clone, Debug)]
976pub enum InfoResponse<'a> {
977 /// A setup script's response.
978 SetupScript(SetupScriptInfoResponse<'a>),
979
980 /// A test's response.
981 Test(TestInfoResponse<'a>),
982}
983
984/// A setup script's response to an information request.
985#[derive(Clone, Debug)]
986pub struct SetupScriptInfoResponse<'a> {
987 /// The identifier of the setup script instance.
988 pub script_id: ScriptId,
989
990 /// The program to run.
991 pub program: String,
992
993 /// The list of arguments to the program.
994 pub args: &'a [String],
995
996 /// The state of the setup script.
997 pub state: UnitState,
998
999 /// Output obtained from the setup script.
1000 pub output: ChildExecutionOutput,
1001}
1002
1003/// A test's response to an information request.
1004#[derive(Clone, Debug)]
1005pub struct TestInfoResponse<'a> {
1006 /// The test instance that the information is about.
1007 pub test_instance: TestInstanceId<'a>,
1008
1009 /// Information about retries.
1010 pub retry_data: RetryData,
1011
1012 /// The state of the test.
1013 pub state: UnitState,
1014
1015 /// Output obtained from the test.
1016 pub output: ChildExecutionOutput,
1017}
1018
1019/// The current state of a test or script process: running, exiting, or
1020/// terminating.
1021///
1022/// Part of information response requests.
1023#[derive(Clone, Debug)]
1024pub enum UnitState {
1025 /// The unit is currently running.
1026 Running {
1027 /// The process ID.
1028 pid: u32,
1029
1030 /// The amount of time the unit has been running.
1031 time_taken: Duration,
1032
1033 /// `Some` if the test is marked as slow, along with the duration after
1034 /// which it was marked as slow.
1035 slow_after: Option<Duration>,
1036 },
1037
1038 /// The test has finished running, and is currently in the process of
1039 /// exiting.
1040 Exiting {
1041 /// The process ID.
1042 pid: u32,
1043
1044 /// The amount of time the unit ran for.
1045 time_taken: Duration,
1046
1047 /// `Some` if the unit is marked as slow, along with the duration after
1048 /// which it was marked as slow.
1049 slow_after: Option<Duration>,
1050
1051 /// The tentative execution result before leaked status is determined.
1052 ///
1053 /// None means that the exit status could not be read, and should be
1054 /// treated as a failure.
1055 tentative_result: Option<ExecutionResult>,
1056
1057 /// How long has been spent waiting for the process to exit.
1058 waiting_duration: Duration,
1059
1060 /// How much longer nextest will wait until the test is marked leaky.
1061 remaining: Duration,
1062 },
1063
1064 /// The child process is being terminated by nextest.
1065 Terminating(UnitTerminatingState),
1066
1067 /// The unit has finished running and the process has exited.
1068 Exited {
1069 /// The result of executing the unit.
1070 result: ExecutionResult,
1071
1072 /// The amount of time the unit ran for.
1073 time_taken: Duration,
1074
1075 /// `Some` if the unit is marked as slow, along with the duration after
1076 /// which it was marked as slow.
1077 slow_after: Option<Duration>,
1078 },
1079
1080 /// A delay is being waited out before the next attempt of the test is
1081 /// started. (Only relevant for tests.)
1082 DelayBeforeNextAttempt {
1083 /// The previous execution result.
1084 previous_result: ExecutionResult,
1085
1086 /// Whether the previous attempt was marked as slow.
1087 previous_slow: bool,
1088
1089 /// How long has been spent waiting so far.
1090 waiting_duration: Duration,
1091
1092 /// How much longer nextest will wait until retrying the test.
1093 remaining: Duration,
1094 },
1095}
1096
1097impl UnitState {
1098 /// Returns true if the state has a valid output attached to it.
1099 pub fn has_valid_output(&self) -> bool {
1100 match self {
1101 UnitState::Running { .. }
1102 | UnitState::Exiting { .. }
1103 | UnitState::Terminating(_)
1104 | UnitState::Exited { .. } => true,
1105 UnitState::DelayBeforeNextAttempt { .. } => false,
1106 }
1107 }
1108}
1109
1110/// The current terminating state of a test or script process.
1111///
1112/// Part of [`UnitState::Terminating`].
1113#[derive(Clone, Debug)]
1114pub struct UnitTerminatingState {
1115 /// The process ID.
1116 pub pid: u32,
1117
1118 /// The amount of time the unit ran for.
1119 pub time_taken: Duration,
1120
1121 /// The reason for the termination.
1122 pub reason: UnitTerminateReason,
1123
1124 /// The method by which the process is being terminated.
1125 pub method: UnitTerminateMethod,
1126
1127 /// How long has been spent waiting for the process to exit.
1128 pub waiting_duration: Duration,
1129
1130 /// How much longer nextest will wait until a kill command is sent to the process.
1131 pub remaining: Duration,
1132}
1133
1134/// The reason for a script or test being forcibly terminated by nextest.
1135///
1136/// Part of information response requests.
1137#[derive(Clone, Copy, Debug)]
1138pub enum UnitTerminateReason {
1139 /// The unit is being terminated due to a test timeout being hit.
1140 Timeout,
1141
1142 /// The unit is being terminated due to nextest receiving a signal.
1143 Signal,
1144
1145 /// The unit is being terminated due to an interrupt (i.e. Ctrl-C).
1146 Interrupt,
1147}
1148
1149impl fmt::Display for UnitTerminateReason {
1150 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1151 match self {
1152 UnitTerminateReason::Timeout => write!(f, "timeout"),
1153 UnitTerminateReason::Signal => write!(f, "signal"),
1154 UnitTerminateReason::Interrupt => write!(f, "interrupt"),
1155 }
1156 }
1157}
1158
1159/// The way in which a script or test is being forcibly terminated by nextest.
1160#[derive(Clone, Copy, Debug)]
1161pub enum UnitTerminateMethod {
1162 /// The unit is being terminated by sending a signal.
1163 #[cfg(unix)]
1164 Signal(UnitTerminateSignal),
1165
1166 /// The unit is being terminated by terminating the Windows job object.
1167 #[cfg(windows)]
1168 JobObject,
1169
1170 /// The unit is being waited on to exit. A termination signal will be sent
1171 /// if it doesn't exit within the grace period.
1172 ///
1173 /// On Windows, this occurs when nextest receives Ctrl-C. In that case, it
1174 /// is assumed that tests will also receive Ctrl-C and exit on their own. If
1175 /// tests do not exit within the grace period configured for them, their
1176 /// corresponding job objects will be terminated.
1177 #[cfg(windows)]
1178 Wait,
1179
1180 /// A fake method used for testing.
1181 #[cfg(test)]
1182 Fake,
1183}
1184
1185#[cfg(unix)]
1186/// The signal that is or was sent to terminate a script or test.
1187#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1188pub enum UnitTerminateSignal {
1189 /// The unit is being terminated by sending a SIGINT.
1190 Interrupt,
1191
1192 /// The unit is being terminated by sending a SIGTERM signal.
1193 Term,
1194
1195 /// The unit is being terminated by sending a SIGHUP signal.
1196 Hangup,
1197
1198 /// The unit is being terminated by sending a SIGQUIT signal.
1199 Quit,
1200
1201 /// The unit is being terminated by sending a SIGKILL signal.
1202 Kill,
1203}
1204
1205#[cfg(unix)]
1206impl fmt::Display for UnitTerminateSignal {
1207 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1208 match self {
1209 UnitTerminateSignal::Interrupt => write!(f, "SIGINT"),
1210 UnitTerminateSignal::Term => write!(f, "SIGTERM"),
1211 UnitTerminateSignal::Hangup => write!(f, "SIGHUP"),
1212 UnitTerminateSignal::Quit => write!(f, "SIGQUIT"),
1213 UnitTerminateSignal::Kill => write!(f, "SIGKILL"),
1214 }
1215 }
1216}
1217
1218#[cfg(test)]
1219mod tests {
1220 use super::*;
1221
1222 #[test]
1223 fn test_is_success() {
1224 assert_eq!(
1225 RunStats::default().summarize_final(),
1226 FinalRunStats::NoTestsRun,
1227 "empty run => no tests run"
1228 );
1229 assert_eq!(
1230 RunStats {
1231 initial_run_count: 42,
1232 finished_count: 42,
1233 ..RunStats::default()
1234 }
1235 .summarize_final(),
1236 FinalRunStats::Success,
1237 "initial run count = final run count => success"
1238 );
1239 assert_eq!(
1240 RunStats {
1241 initial_run_count: 42,
1242 finished_count: 41,
1243 ..RunStats::default()
1244 }
1245 .summarize_final(),
1246 FinalRunStats::Cancelled(RunStatsFailureKind::Test {
1247 initial_run_count: 42,
1248 not_run: 1
1249 }),
1250 "initial run count > final run count => cancelled"
1251 );
1252 assert_eq!(
1253 RunStats {
1254 initial_run_count: 42,
1255 finished_count: 42,
1256 failed: 1,
1257 ..RunStats::default()
1258 }
1259 .summarize_final(),
1260 FinalRunStats::Failed(RunStatsFailureKind::Test {
1261 initial_run_count: 42,
1262 not_run: 0
1263 }),
1264 "failed => failure"
1265 );
1266 assert_eq!(
1267 RunStats {
1268 initial_run_count: 42,
1269 finished_count: 42,
1270 exec_failed: 1,
1271 ..RunStats::default()
1272 }
1273 .summarize_final(),
1274 FinalRunStats::Failed(RunStatsFailureKind::Test {
1275 initial_run_count: 42,
1276 not_run: 0
1277 }),
1278 "exec failed => failure"
1279 );
1280 assert_eq!(
1281 RunStats {
1282 initial_run_count: 42,
1283 finished_count: 42,
1284 timed_out: 1,
1285 ..RunStats::default()
1286 }
1287 .summarize_final(),
1288 FinalRunStats::Failed(RunStatsFailureKind::Test {
1289 initial_run_count: 42,
1290 not_run: 0
1291 }),
1292 "timed out => failure"
1293 );
1294 assert_eq!(
1295 RunStats {
1296 initial_run_count: 42,
1297 finished_count: 42,
1298 skipped: 1,
1299 ..RunStats::default()
1300 }
1301 .summarize_final(),
1302 FinalRunStats::Success,
1303 "skipped => not considered a failure"
1304 );
1305
1306 assert_eq!(
1307 RunStats {
1308 setup_scripts_initial_count: 2,
1309 setup_scripts_finished_count: 1,
1310 ..RunStats::default()
1311 }
1312 .summarize_final(),
1313 FinalRunStats::Cancelled(RunStatsFailureKind::SetupScript),
1314 "setup script failed => failure"
1315 );
1316
1317 assert_eq!(
1318 RunStats {
1319 setup_scripts_initial_count: 2,
1320 setup_scripts_finished_count: 2,
1321 setup_scripts_failed: 1,
1322 ..RunStats::default()
1323 }
1324 .summarize_final(),
1325 FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1326 "setup script failed => failure"
1327 );
1328 assert_eq!(
1329 RunStats {
1330 setup_scripts_initial_count: 2,
1331 setup_scripts_finished_count: 2,
1332 setup_scripts_exec_failed: 1,
1333 ..RunStats::default()
1334 }
1335 .summarize_final(),
1336 FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1337 "setup script exec failed => failure"
1338 );
1339 assert_eq!(
1340 RunStats {
1341 setup_scripts_initial_count: 2,
1342 setup_scripts_finished_count: 2,
1343 setup_scripts_timed_out: 1,
1344 ..RunStats::default()
1345 }
1346 .summarize_final(),
1347 FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1348 "setup script timed out => failure"
1349 );
1350 assert_eq!(
1351 RunStats {
1352 setup_scripts_initial_count: 2,
1353 setup_scripts_finished_count: 2,
1354 setup_scripts_passed: 2,
1355 ..RunStats::default()
1356 }
1357 .summarize_final(),
1358 FinalRunStats::NoTestsRun,
1359 "setup scripts passed => success, but no tests run"
1360 );
1361 }
1362}