nextest_runner/reporter/events.rs
1// Copyright (c) The nextest Contributors
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Events for the reporter.
5//!
6//! These types form the interface between the test runner and the test
7//! reporter. The root structure for all events is [`TestEvent`].
8
9use super::{FinalStatusLevel, StatusLevel, TestOutputDisplay};
10use crate::{
11 config::{elements::LeakTimeoutResult, scripts::ScriptId},
12 list::{TestInstance, TestInstanceId, TestList},
13 runner::{StressCondition, StressCount},
14 test_output::ChildExecutionOutput,
15};
16use chrono::{DateTime, FixedOffset};
17use nextest_metadata::MismatchReason;
18use quick_junit::ReportUuid;
19use std::{collections::BTreeMap, fmt, num::NonZero, process::ExitStatus, time::Duration};
20
21/// A test event.
22///
23/// Events are produced by a [`TestRunner`](crate::runner::TestRunner) and
24/// consumed by a [`Reporter`](crate::reporter::Reporter).
25#[derive(Clone, Debug)]
26pub struct TestEvent<'a> {
27 /// The time at which the event was generated, including the offset from UTC.
28 pub timestamp: DateTime<FixedOffset>,
29
30 /// The amount of time elapsed since the start of the test run.
31 pub elapsed: Duration,
32
33 /// The kind of test event this is.
34 pub kind: TestEventKind<'a>,
35}
36
37/// The kind of test event this is.
38///
39/// Forms part of [`TestEvent`].
40#[derive(Clone, Debug)]
41pub enum TestEventKind<'a> {
42 /// The test run started.
43 RunStarted {
44 /// The list of tests that will be run.
45 ///
46 /// The methods on the test list indicate the number of tests that will be run.
47 test_list: &'a TestList<'a>,
48
49 /// The UUID for this run.
50 run_id: ReportUuid,
51
52 /// The nextest profile chosen for this run.
53 profile_name: String,
54
55 /// The command-line arguments for the process.
56 cli_args: Vec<String>,
57
58 /// The stress condition for this run, if any.
59 stress_condition: Option<StressCondition>,
60 },
61
62 /// When running stress tests serially, a sub-run started.
63 StressSubRunStarted {
64 /// The amount of progress completed so far.
65 progress: StressProgress,
66 },
67
68 /// A setup script started.
69 SetupScriptStarted {
70 /// If a stress test is being run, the stress index, starting from 0.
71 stress_index: Option<StressIndex>,
72
73 /// The setup script index.
74 index: usize,
75
76 /// The total number of setup scripts.
77 total: usize,
78
79 /// The script ID.
80 script_id: ScriptId,
81
82 /// The program to run.
83 program: String,
84
85 /// The arguments to the program.
86 args: &'a [String],
87
88 /// True if some output from the setup script is being passed through.
89 no_capture: bool,
90 },
91
92 /// A setup script was slow.
93 SetupScriptSlow {
94 /// If a stress test is being run, the stress index, starting from 0.
95 stress_index: Option<StressIndex>,
96
97 /// The script ID.
98 script_id: ScriptId,
99
100 /// The program to run.
101 program: String,
102
103 /// The arguments to the program.
104 args: &'a [String],
105
106 /// The amount of time elapsed since the start of execution.
107 elapsed: Duration,
108
109 /// True if the script has hit its timeout and is about to be terminated.
110 will_terminate: bool,
111 },
112
113 /// A setup script completed execution.
114 SetupScriptFinished {
115 /// If a stress test is being run, the stress index, starting from 0.
116 stress_index: Option<StressIndex>,
117
118 /// The setup script index.
119 index: usize,
120
121 /// The total number of setup scripts.
122 total: usize,
123
124 /// The script ID.
125 script_id: ScriptId,
126
127 /// The program to run.
128 program: String,
129
130 /// The arguments to the program.
131 args: &'a [String],
132
133 /// Whether the JUnit report should store success output for this script.
134 junit_store_success_output: bool,
135
136 /// Whether the JUnit report should store failure output for this script.
137 junit_store_failure_output: bool,
138
139 /// True if some output from the setup script was passed through.
140 no_capture: bool,
141
142 /// The execution status of the setup script.
143 run_status: SetupScriptExecuteStatus,
144 },
145
146 // TODO: add events for BinaryStarted and BinaryFinished? May want a slightly different way to
147 // do things, maybe a couple of reporter traits (one for the run as a whole and one for each
148 // binary).
149 /// A test started running.
150 TestStarted {
151 /// If a stress test is being run, the stress index, starting from 0.
152 stress_index: Option<StressIndex>,
153
154 /// The test instance that was started.
155 test_instance: TestInstance<'a>,
156
157 /// Current run statistics so far.
158 current_stats: RunStats,
159
160 /// The number of tests currently running, including this one.
161 running: usize,
162
163 /// The cancel status of the run. This is None if the run is still ongoing.
164 cancel_state: Option<CancelReason>,
165 },
166
167 /// A test was slower than a configured soft timeout.
168 TestSlow {
169 /// If a stress test is being run, the stress index, starting from 0.
170 stress_index: Option<StressIndex>,
171
172 /// The test instance that was slow.
173 test_instance: TestInstance<'a>,
174
175 /// Retry data.
176 retry_data: RetryData,
177
178 /// The amount of time that has elapsed since the beginning of the test.
179 elapsed: Duration,
180
181 /// True if the test has hit its timeout and is about to be terminated.
182 will_terminate: bool,
183 },
184
185 /// A test attempt failed and will be retried in the future.
186 ///
187 /// This event does not occur on the final run of a failing test.
188 TestAttemptFailedWillRetry {
189 /// If a stress test is being run, the stress index, starting from 0.
190 stress_index: Option<StressIndex>,
191
192 /// The test instance that is being retried.
193 test_instance: TestInstance<'a>,
194
195 /// The status of this attempt to run the test. Will never be success.
196 run_status: ExecuteStatus,
197
198 /// The delay before the next attempt to run the test.
199 delay_before_next_attempt: Duration,
200
201 /// Whether failure outputs are printed out.
202 failure_output: TestOutputDisplay,
203 },
204
205 /// A retry has started.
206 TestRetryStarted {
207 /// If a stress test is being run, the stress index, starting from 0.
208 stress_index: Option<StressIndex>,
209
210 /// The test instance that is being retried.
211 test_instance: TestInstance<'a>,
212
213 /// Data related to retries.
214 retry_data: RetryData,
215 },
216
217 /// A test finished running.
218 TestFinished {
219 /// If a stress test is being run, the stress index, starting from 0.
220 stress_index: Option<StressIndex>,
221
222 /// The test instance that finished running.
223 test_instance: TestInstance<'a>,
224
225 /// Test setting for success output.
226 success_output: TestOutputDisplay,
227
228 /// Test setting for failure output.
229 failure_output: TestOutputDisplay,
230
231 /// Whether the JUnit report should store success output for this test.
232 junit_store_success_output: bool,
233
234 /// Whether the JUnit report should store failure output for this test.
235 junit_store_failure_output: bool,
236
237 /// Information about all the runs for this test.
238 run_statuses: ExecutionStatuses,
239
240 /// Current statistics for number of tests so far.
241 current_stats: RunStats,
242
243 /// The number of tests that are currently running, excluding this one.
244 running: usize,
245
246 /// The cancel status of the run. This is None if the run is still ongoing.
247 cancel_state: Option<CancelReason>,
248 },
249
250 /// A test was skipped.
251 TestSkipped {
252 /// If a stress test is being run, the stress index, starting from 0.
253 stress_index: Option<StressIndex>,
254
255 /// The test instance that was skipped.
256 test_instance: TestInstance<'a>,
257
258 /// The reason this test was skipped.
259 reason: MismatchReason,
260 },
261
262 /// An information request was received.
263 InfoStarted {
264 /// The number of tasks currently running. This is the same as the
265 /// number of expected responses.
266 total: usize,
267
268 /// Statistics for the run.
269 run_stats: RunStats,
270 },
271
272 /// Information about a script or test was received.
273 InfoResponse {
274 /// The index of the response, starting from 0.
275 index: usize,
276
277 /// The total number of responses expected.
278 total: usize,
279
280 /// The response itself.
281 response: InfoResponse<'a>,
282 },
283
284 /// An information request was completed.
285 InfoFinished {
286 /// The number of responses that were not received. In most cases, this
287 /// is 0.
288 missing: usize,
289 },
290
291 /// `Enter` was pressed. Either a newline or a progress bar snapshot needs
292 /// to be printed.
293 InputEnter {
294 /// Current statistics for number of tests so far.
295 current_stats: RunStats,
296
297 /// The number of tests running.
298 running: usize,
299
300 /// The cancel status of the run. This is None if the run is still ongoing.
301 cancel_reason: Option<CancelReason>,
302 },
303
304 /// A cancellation notice was received.
305 RunBeginCancel {
306 /// The number of setup scripts still running.
307 setup_scripts_running: usize,
308
309 /// Current statistics for number of tests so far.
310 current_stats: RunStats,
311
312 /// The number of tests still running.
313 running: usize,
314
315 /// The reason this run was cancelled.
316 reason: CancelReason,
317 },
318
319 /// A forcible kill was requested due to receiving a signal.
320 RunBeginKill {
321 /// The number of setup scripts still running.
322 setup_scripts_running: usize,
323
324 /// Current statistics for number of tests so far.
325 current_stats: RunStats,
326
327 /// The number of tests still running.
328 running: usize,
329
330 /// The reason this run was killed.
331 reason: CancelReason,
332 },
333
334 /// A SIGTSTP event was received and the run was paused.
335 RunPaused {
336 /// The number of setup scripts running.
337 setup_scripts_running: usize,
338
339 /// The number of tests currently running.
340 running: usize,
341 },
342
343 /// A SIGCONT event was received and the run is being continued.
344 RunContinued {
345 /// The number of setup scripts that will be started up again.
346 setup_scripts_running: usize,
347
348 /// The number of tests that will be started up again.
349 running: usize,
350 },
351
352 /// When running stress tests serially, a sub-run finished.
353 StressSubRunFinished {
354 /// The amount of progress completed so far.
355 progress: StressProgress,
356
357 /// The amount of time it took for this sub-run to complete.
358 sub_elapsed: Duration,
359
360 /// Statistics for the sub-run.
361 sub_stats: RunStats,
362 },
363
364 /// The test run finished.
365 RunFinished {
366 /// The unique ID for this run.
367 run_id: ReportUuid,
368
369 /// The time at which the run was started.
370 start_time: DateTime<FixedOffset>,
371
372 /// The amount of time it took for the tests to run.
373 elapsed: Duration,
374
375 /// Statistics for the run (last sub-run if this is a stress test).
376 run_stats: RunStats,
377 },
378}
379
380/// Progress for a stress test.
381#[derive(Clone, Debug)]
382pub enum StressProgress {
383 /// This is a count-based stress run.
384 Count {
385 /// The total number of stress runs.
386 total: StressCount,
387
388 /// The total time that has elapsed across all stress runs so far.
389 elapsed: Duration,
390
391 /// The number of stress runs that have been completed.
392 completed: u32,
393 },
394
395 /// This is a time-based stress run.
396 Time {
397 /// The total time for the stress run.
398 total: Duration,
399
400 /// The total time that has elapsed across all stress runs so far.
401 elapsed: Duration,
402
403 /// The number of stress runs that have been completed.
404 completed: u32,
405 },
406}
407
408impl StressProgress {
409 /// Returns the remaining amount of work if the progress indicates there's
410 /// still more to do, otherwise `None`.
411 pub fn remaining(&self) -> Option<StressRemaining> {
412 match self {
413 Self::Count {
414 total: StressCount::Count(total),
415 elapsed: _,
416 completed,
417 } => total
418 .get()
419 .checked_sub(*completed)
420 .and_then(|remaining| NonZero::try_from(remaining).ok())
421 .map(StressRemaining::Count),
422 Self::Count {
423 total: StressCount::Infinite,
424 ..
425 } => Some(StressRemaining::Infinite),
426 Self::Time {
427 total,
428 elapsed,
429 completed: _,
430 } => total.checked_sub(*elapsed).map(StressRemaining::Time),
431 }
432 }
433}
434
435/// For a stress test, the amount of time or number of stress runs remaining.
436#[derive(Clone, Debug)]
437pub enum StressRemaining {
438 /// The number of stress runs remaining, guaranteed to be non-zero.
439 Count(NonZero<u32>),
440
441 /// Infinite number of stress runs remaining.
442 Infinite,
443
444 /// The amount of time remaining.
445 Time(Duration),
446}
447
448/// The index of the current stress run.
449#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
450pub struct StressIndex {
451 /// The 0-indexed index.
452 pub current: u32,
453
454 /// The total number of stress runs, if that is available.
455 pub total: Option<NonZero<u32>>,
456}
457
458/// Statistics for a test run.
459#[derive(Copy, Clone, Default, Debug, Eq, PartialEq)]
460pub struct RunStats {
461 /// The total number of tests that were expected to be run at the beginning.
462 ///
463 /// If the test run is cancelled, this will be more than `finished_count` at the end.
464 pub initial_run_count: usize,
465
466 /// The total number of tests that finished running.
467 pub finished_count: usize,
468
469 /// The total number of setup scripts that were expected to be run at the beginning.
470 ///
471 /// If the test run is cancelled, this will be more than `finished_count` at the end.
472 pub setup_scripts_initial_count: usize,
473
474 /// The total number of setup scripts that finished running.
475 pub setup_scripts_finished_count: usize,
476
477 /// The number of setup scripts that passed.
478 pub setup_scripts_passed: usize,
479
480 /// The number of setup scripts that failed.
481 pub setup_scripts_failed: usize,
482
483 /// The number of setup scripts that encountered an execution failure.
484 pub setup_scripts_exec_failed: usize,
485
486 /// The number of setup scripts that timed out.
487 pub setup_scripts_timed_out: usize,
488
489 /// The number of tests that passed. Includes `passed_slow`, `flaky` and `leaky`.
490 pub passed: usize,
491
492 /// The number of slow tests that passed.
493 pub passed_slow: usize,
494
495 /// The number of tests that passed on retry.
496 pub flaky: usize,
497
498 /// The number of tests that failed.
499 pub failed: usize,
500
501 /// The number of failed tests that were slow.
502 pub failed_slow: usize,
503
504 /// The number of tests that timed out.
505 pub timed_out: usize,
506
507 /// The number of tests that passed but leaked handles.
508 pub leaky: usize,
509
510 /// The number of tests that otherwise passed, but leaked handles and were
511 /// treated as failed as a result.
512 pub leaky_failed: usize,
513
514 /// The number of tests that encountered an execution failure.
515 pub exec_failed: usize,
516
517 /// The number of tests that were skipped.
518 pub skipped: usize,
519}
520
521impl RunStats {
522 /// Returns true if there are any failures recorded in the stats.
523 pub fn has_failures(&self) -> bool {
524 self.failed_setup_script_count() > 0 || self.failed_count() > 0
525 }
526
527 /// Returns count of setup scripts that did not pass.
528 pub fn failed_setup_script_count(&self) -> usize {
529 self.setup_scripts_failed + self.setup_scripts_exec_failed + self.setup_scripts_timed_out
530 }
531
532 /// Returns count of tests that did not pass.
533 pub fn failed_count(&self) -> usize {
534 self.failed + self.exec_failed + self.timed_out
535 }
536
537 /// Summarizes the stats as an enum at the end of a test run.
538 pub fn summarize_final(&self) -> FinalRunStats {
539 // Check for failures first. The order of setup scripts vs tests should not be important,
540 // though we don't assert that here.
541 if self.failed_setup_script_count() > 0 {
542 FinalRunStats::Failed(RunStatsFailureKind::SetupScript)
543 } else if self.setup_scripts_initial_count > self.setup_scripts_finished_count {
544 FinalRunStats::Cancelled(RunStatsFailureKind::SetupScript)
545 } else if self.failed_count() > 0 {
546 FinalRunStats::Failed(RunStatsFailureKind::Test {
547 initial_run_count: self.initial_run_count,
548 not_run: self.initial_run_count.saturating_sub(self.finished_count),
549 })
550 } else if self.initial_run_count > self.finished_count {
551 FinalRunStats::Cancelled(RunStatsFailureKind::Test {
552 initial_run_count: self.initial_run_count,
553 not_run: self.initial_run_count.saturating_sub(self.finished_count),
554 })
555 } else if self.finished_count == 0 {
556 FinalRunStats::NoTestsRun
557 } else {
558 FinalRunStats::Success
559 }
560 }
561
562 pub(crate) fn on_setup_script_finished(&mut self, status: &SetupScriptExecuteStatus) {
563 self.setup_scripts_finished_count += 1;
564
565 match status.result {
566 ExecutionResult::Pass
567 | ExecutionResult::Leak {
568 result: LeakTimeoutResult::Pass,
569 } => {
570 self.setup_scripts_passed += 1;
571 }
572 ExecutionResult::Fail { .. }
573 | ExecutionResult::Leak {
574 result: LeakTimeoutResult::Fail,
575 } => {
576 self.setup_scripts_failed += 1;
577 }
578 ExecutionResult::ExecFail => {
579 self.setup_scripts_exec_failed += 1;
580 }
581 ExecutionResult::Timeout => {
582 self.setup_scripts_timed_out += 1;
583 }
584 }
585 }
586
587 pub(crate) fn on_test_finished(&mut self, run_statuses: &ExecutionStatuses) {
588 self.finished_count += 1;
589 // run_statuses is guaranteed to have at least one element.
590 // * If the last element is success, treat it as success (and possibly flaky).
591 // * If the last element is a failure, use it to determine fail/exec fail.
592 // Note that this is different from what Maven Surefire does (use the first failure):
593 // https://maven.apache.org/surefire/maven-surefire-plugin/examples/rerun-failing-tests.html
594 //
595 // This is not likely to matter much in practice since failures are likely to be of the
596 // same type.
597 let last_status = run_statuses.last_status();
598 match last_status.result {
599 ExecutionResult::Pass => {
600 self.passed += 1;
601 if last_status.is_slow {
602 self.passed_slow += 1;
603 }
604 if run_statuses.len() > 1 {
605 self.flaky += 1;
606 }
607 }
608 ExecutionResult::Leak {
609 result: LeakTimeoutResult::Pass,
610 } => {
611 self.passed += 1;
612 self.leaky += 1;
613 if last_status.is_slow {
614 self.passed_slow += 1;
615 }
616 if run_statuses.len() > 1 {
617 self.flaky += 1;
618 }
619 }
620 ExecutionResult::Leak {
621 result: LeakTimeoutResult::Fail,
622 } => {
623 self.failed += 1;
624 self.leaky_failed += 1;
625 if last_status.is_slow {
626 self.failed_slow += 1;
627 }
628 }
629 ExecutionResult::Fail { .. } => {
630 self.failed += 1;
631 if last_status.is_slow {
632 self.failed_slow += 1;
633 }
634 }
635 ExecutionResult::Timeout => self.timed_out += 1,
636 ExecutionResult::ExecFail => self.exec_failed += 1,
637 }
638 }
639}
640
641/// A type summarizing the possible outcomes of a test run.
642#[derive(Copy, Clone, Debug, Eq, PartialEq)]
643pub enum FinalRunStats {
644 /// The test run was successful, or is successful so far.
645 Success,
646
647 /// The test run was successful, or is successful so far, but no tests were selected to run.
648 NoTestsRun,
649
650 /// The test run was cancelled.
651 Cancelled(RunStatsFailureKind),
652
653 /// At least one test failed.
654 Failed(RunStatsFailureKind),
655}
656
657/// A type summarizing the step at which a test run failed.
658#[derive(Copy, Clone, Debug, Eq, PartialEq)]
659pub enum RunStatsFailureKind {
660 /// The run was interrupted during setup script execution.
661 SetupScript,
662
663 /// The run was interrupted during test execution.
664 Test {
665 /// The total number of tests scheduled.
666 initial_run_count: usize,
667
668 /// The number of tests not run, or for a currently-executing test the number queued up to
669 /// run.
670 not_run: usize,
671 },
672}
673
674/// Information about executions of a test, including retries.
675#[derive(Clone, Debug)]
676pub struct ExecutionStatuses {
677 /// This is guaranteed to be non-empty.
678 statuses: Vec<ExecuteStatus>,
679}
680
681#[expect(clippy::len_without_is_empty)] // RunStatuses is never empty
682impl ExecutionStatuses {
683 pub(crate) fn new(statuses: Vec<ExecuteStatus>) -> Self {
684 Self { statuses }
685 }
686
687 /// Returns the last execution status.
688 ///
689 /// This status is typically used as the final result.
690 pub fn last_status(&self) -> &ExecuteStatus {
691 self.statuses
692 .last()
693 .expect("execution statuses is non-empty")
694 }
695
696 /// Iterates over all the statuses.
697 pub fn iter(&self) -> impl DoubleEndedIterator<Item = &'_ ExecuteStatus> + '_ {
698 self.statuses.iter()
699 }
700
701 /// Returns the number of times the test was executed.
702 pub fn len(&self) -> usize {
703 self.statuses.len()
704 }
705
706 /// Returns a description of self.
707 pub fn describe(&self) -> ExecutionDescription<'_> {
708 let last_status = self.last_status();
709 if last_status.result.is_success() {
710 if self.statuses.len() > 1 {
711 ExecutionDescription::Flaky {
712 last_status,
713 prior_statuses: &self.statuses[..self.statuses.len() - 1],
714 }
715 } else {
716 ExecutionDescription::Success {
717 single_status: last_status,
718 }
719 }
720 } else {
721 let first_status = self
722 .statuses
723 .first()
724 .expect("execution statuses is non-empty");
725 let retries = &self.statuses[1..];
726 ExecutionDescription::Failure {
727 first_status,
728 last_status,
729 retries,
730 }
731 }
732 }
733}
734
735/// A description of test executions obtained from `ExecuteStatuses`.
736///
737/// This can be used to quickly determine whether a test passed, failed or was flaky.
738#[derive(Copy, Clone, Debug)]
739pub enum ExecutionDescription<'a> {
740 /// The test was run once and was successful.
741 Success {
742 /// The status of the test.
743 single_status: &'a ExecuteStatus,
744 },
745
746 /// The test was run more than once. The final result was successful.
747 Flaky {
748 /// The last, successful status.
749 last_status: &'a ExecuteStatus,
750
751 /// Previous statuses, none of which are successes.
752 prior_statuses: &'a [ExecuteStatus],
753 },
754
755 /// The test was run once, or possibly multiple times. All runs failed.
756 Failure {
757 /// The first, failing status.
758 first_status: &'a ExecuteStatus,
759
760 /// The last, failing status. Same as the first status if no retries were performed.
761 last_status: &'a ExecuteStatus,
762
763 /// Any retries that were performed. All of these runs failed.
764 ///
765 /// May be empty.
766 retries: &'a [ExecuteStatus],
767 },
768}
769
770impl<'a> ExecutionDescription<'a> {
771 /// Returns the status level for this `ExecutionDescription`.
772 pub fn status_level(&self) -> StatusLevel {
773 match self {
774 ExecutionDescription::Success { single_status } => match single_status.result {
775 ExecutionResult::Leak {
776 result: LeakTimeoutResult::Pass,
777 } => StatusLevel::Leak,
778 ExecutionResult::Pass => StatusLevel::Pass,
779 other => unreachable!("Success only permits Pass or Leak Pass, found {other:?}"),
780 },
781 // A flaky test implies that we print out retry information for it.
782 ExecutionDescription::Flaky { .. } => StatusLevel::Retry,
783 ExecutionDescription::Failure { .. } => StatusLevel::Fail,
784 }
785 }
786
787 /// Returns the final status level for this `ExecutionDescription`.
788 pub fn final_status_level(&self) -> FinalStatusLevel {
789 match self {
790 ExecutionDescription::Success { single_status, .. } => {
791 // Slow is higher priority than leaky, so return slow first here.
792 if single_status.is_slow {
793 FinalStatusLevel::Slow
794 } else {
795 match single_status.result {
796 ExecutionResult::Pass => FinalStatusLevel::Pass,
797 ExecutionResult::Leak {
798 result: LeakTimeoutResult::Pass,
799 } => FinalStatusLevel::Leak,
800 other => {
801 unreachable!("Success only permits Pass or Leak Pass, found {other:?}")
802 }
803 }
804 }
805 }
806 // A flaky test implies that we print out retry information for it.
807 ExecutionDescription::Flaky { .. } => FinalStatusLevel::Flaky,
808 ExecutionDescription::Failure { .. } => FinalStatusLevel::Fail,
809 }
810 }
811
812 /// Returns the last run status.
813 pub fn last_status(&self) -> &'a ExecuteStatus {
814 match self {
815 ExecutionDescription::Success {
816 single_status: last_status,
817 }
818 | ExecutionDescription::Flaky { last_status, .. }
819 | ExecutionDescription::Failure { last_status, .. } => last_status,
820 }
821 }
822}
823
824/// Information about a single execution of a test.
825#[derive(Clone, Debug)]
826pub struct ExecuteStatus {
827 /// Retry-related data.
828 pub retry_data: RetryData,
829 /// The stdout and stderr output for this test.
830 pub output: ChildExecutionOutput,
831 /// The execution result for this test: pass, fail or execution error.
832 pub result: ExecutionResult,
833 /// The time at which the test started.
834 pub start_time: DateTime<FixedOffset>,
835 /// The time it took for the test to run.
836 pub time_taken: Duration,
837 /// Whether this test counts as slow.
838 pub is_slow: bool,
839 /// The delay will be non-zero if this is a retry and delay was specified.
840 pub delay_before_start: Duration,
841}
842
843/// Information about the execution of a setup script.
844#[derive(Clone, Debug)]
845pub struct SetupScriptExecuteStatus {
846 /// Output for this setup script.
847 pub output: ChildExecutionOutput,
848
849 /// The execution result for this setup script: pass, fail or execution error.
850 pub result: ExecutionResult,
851
852 /// The time at which the script started.
853 pub start_time: DateTime<FixedOffset>,
854
855 /// The time it took for the script to run.
856 pub time_taken: Duration,
857
858 /// Whether this script counts as slow.
859 pub is_slow: bool,
860
861 /// The map of environment variables that were set by this script.
862 ///
863 /// `None` if an error occurred while running the script or reading the
864 /// environment map.
865 pub env_map: Option<SetupScriptEnvMap>,
866}
867
868/// A map of environment variables set by a setup script.
869///
870/// Part of [`SetupScriptExecuteStatus`].
871#[derive(Clone, Debug)]
872pub struct SetupScriptEnvMap {
873 /// The map of environment variables set by the script.
874 pub env_map: BTreeMap<String, String>,
875}
876
877/// Data related to retries for a test.
878#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)]
879pub struct RetryData {
880 /// The current attempt. In the range `[1, total_attempts]`.
881 pub attempt: usize,
882
883 /// The total number of times this test can be run. Equal to `1 + retries`.
884 pub total_attempts: usize,
885}
886
887impl RetryData {
888 /// Returns true if there are no more attempts after this.
889 pub fn is_last_attempt(&self) -> bool {
890 self.attempt >= self.total_attempts
891 }
892}
893
894/// Whether a test passed, failed or an error occurred while executing the test.
895#[derive(Copy, Clone, Debug, Eq, PartialEq)]
896pub enum ExecutionResult {
897 /// The test passed.
898 Pass,
899 /// The test passed but leaked handles. This usually indicates that
900 /// a subprocess that inherit standard IO was created, but it didn't shut down when
901 /// the test failed.
902 Leak {
903 /// Whether this leak was treated as a failure.
904 ///
905 /// Note the difference between `Fail { leaked: true }` and `Leak {
906 /// failed: true }`. In the former case, the test failed and also leaked
907 /// handles. In the latter case, the test passed but leaked handles, and
908 /// configuration indicated that this is a failure.
909 result: LeakTimeoutResult,
910 },
911 /// The test failed.
912 Fail {
913 /// The abort status of the test, if any (for example, the signal on Unix).
914 failure_status: FailureStatus,
915
916 /// Whether a test leaked handles. If set to true, this usually indicates that
917 /// a subprocess that inherit standard IO was created, but it didn't shut down when
918 /// the test failed.
919 leaked: bool,
920 },
921 /// An error occurred while executing the test.
922 ExecFail,
923 /// The test was terminated due to a timeout.
924 Timeout,
925}
926
927impl ExecutionResult {
928 /// Returns true if the test was successful.
929 pub fn is_success(self) -> bool {
930 match self {
931 ExecutionResult::Pass
932 | ExecutionResult::Leak {
933 result: LeakTimeoutResult::Pass,
934 } => true,
935 ExecutionResult::Leak {
936 result: LeakTimeoutResult::Fail,
937 }
938 | ExecutionResult::Fail { .. }
939 | ExecutionResult::ExecFail
940 | ExecutionResult::Timeout => false,
941 }
942 }
943}
944
945/// Failure status: either an exit code or an abort status.
946#[derive(Clone, Copy, Debug, PartialEq, Eq)]
947pub enum FailureStatus {
948 /// The test exited with a non-zero exit code.
949 ExitCode(i32),
950
951 /// The test aborted.
952 Abort(AbortStatus),
953}
954
955impl FailureStatus {
956 /// Extract the failure status from an `ExitStatus`.
957 pub fn extract(exit_status: ExitStatus) -> Self {
958 if let Some(abort_status) = AbortStatus::extract(exit_status) {
959 FailureStatus::Abort(abort_status)
960 } else {
961 FailureStatus::ExitCode(
962 exit_status
963 .code()
964 .expect("if abort_status is None, then code must be present"),
965 )
966 }
967 }
968}
969
970/// A regular exit code or Windows NT abort status for a test.
971///
972/// Returned as part of the [`ExecutionResult::Fail`] variant.
973#[derive(Copy, Clone, Eq, PartialEq)]
974pub enum AbortStatus {
975 /// The test was aborted due to a signal on Unix.
976 #[cfg(unix)]
977 UnixSignal(i32),
978
979 /// The test was determined to have aborted because the high bit was set on Windows.
980 #[cfg(windows)]
981 WindowsNtStatus(windows_sys::Win32::Foundation::NTSTATUS),
982
983 /// The test was terminated via job object on Windows.
984 #[cfg(windows)]
985 JobObject,
986}
987
988impl AbortStatus {
989 /// Extract the abort status from an [`ExitStatus`].
990 pub fn extract(exit_status: ExitStatus) -> Option<Self> {
991 cfg_if::cfg_if! {
992 if #[cfg(unix)] {
993 // On Unix, extract the signal if it's found.
994 use std::os::unix::process::ExitStatusExt;
995 exit_status.signal().map(AbortStatus::UnixSignal)
996 } else if #[cfg(windows)] {
997 exit_status.code().and_then(|code| {
998 (code < 0).then_some(AbortStatus::WindowsNtStatus(code))
999 })
1000 } else {
1001 None
1002 }
1003 }
1004 }
1005}
1006
1007impl fmt::Debug for AbortStatus {
1008 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1009 match self {
1010 #[cfg(unix)]
1011 AbortStatus::UnixSignal(signal) => write!(f, "UnixSignal({signal})"),
1012 #[cfg(windows)]
1013 AbortStatus::WindowsNtStatus(status) => write!(f, "WindowsNtStatus({status:x})"),
1014 #[cfg(windows)]
1015 AbortStatus::JobObject => write!(f, "JobObject"),
1016 }
1017 }
1018}
1019
1020// Note: the order here matters -- it indicates severity of cancellation
1021/// The reason why a test run is being cancelled.
1022#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1023#[cfg_attr(test, derive(test_strategy::Arbitrary))]
1024pub enum CancelReason {
1025 /// A setup script failed.
1026 SetupScriptFailure,
1027
1028 /// A test failed and --no-fail-fast wasn't specified.
1029 TestFailure,
1030
1031 /// An error occurred while reporting results.
1032 ReportError,
1033
1034 /// The global timeout was exceeded.
1035 GlobalTimeout,
1036
1037 /// A termination signal (on Unix, SIGTERM or SIGHUP) was received.
1038 Signal,
1039
1040 /// An interrupt (on Unix, Ctrl-C) was received.
1041 Interrupt,
1042
1043 /// A second signal was received, and the run is being forcibly killed.
1044 SecondSignal,
1045}
1046
1047impl CancelReason {
1048 pub(crate) fn to_static_str(self) -> &'static str {
1049 match self {
1050 CancelReason::SetupScriptFailure => "setup script failure",
1051 CancelReason::TestFailure => "test failure",
1052 CancelReason::ReportError => "reporting error",
1053 CancelReason::GlobalTimeout => "global timeout",
1054 CancelReason::Signal => "signal",
1055 CancelReason::Interrupt => "interrupt",
1056 CancelReason::SecondSignal => "second signal",
1057 }
1058 }
1059}
1060/// The kind of unit of work that nextest is executing.
1061#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1062pub enum UnitKind {
1063 /// A test.
1064 Test,
1065
1066 /// A script (e.g. a setup script).
1067 Script,
1068}
1069
1070impl UnitKind {
1071 pub(crate) const WAITING_ON_TEST_MESSAGE: &str = "waiting on test process";
1072 pub(crate) const WAITING_ON_SCRIPT_MESSAGE: &str = "waiting on script process";
1073
1074 pub(crate) const EXECUTING_TEST_MESSAGE: &str = "executing test";
1075 pub(crate) const EXECUTING_SCRIPT_MESSAGE: &str = "executing script";
1076
1077 pub(crate) fn waiting_on_message(&self) -> &'static str {
1078 match self {
1079 UnitKind::Test => Self::WAITING_ON_TEST_MESSAGE,
1080 UnitKind::Script => Self::WAITING_ON_SCRIPT_MESSAGE,
1081 }
1082 }
1083
1084 pub(crate) fn executing_message(&self) -> &'static str {
1085 match self {
1086 UnitKind::Test => Self::EXECUTING_TEST_MESSAGE,
1087 UnitKind::Script => Self::EXECUTING_SCRIPT_MESSAGE,
1088 }
1089 }
1090}
1091
1092impl fmt::Display for UnitKind {
1093 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1094 match self {
1095 UnitKind::Script => write!(f, "script"),
1096 UnitKind::Test => write!(f, "test"),
1097 }
1098 }
1099}
1100
1101/// A response to an information request.
1102#[derive(Clone, Debug)]
1103pub enum InfoResponse<'a> {
1104 /// A setup script's response.
1105 SetupScript(SetupScriptInfoResponse<'a>),
1106
1107 /// A test's response.
1108 Test(TestInfoResponse<'a>),
1109}
1110
1111/// A setup script's response to an information request.
1112#[derive(Clone, Debug)]
1113pub struct SetupScriptInfoResponse<'a> {
1114 /// The stress index of the setup script.
1115 pub stress_index: Option<StressIndex>,
1116
1117 /// The identifier of the setup script instance.
1118 pub script_id: ScriptId,
1119
1120 /// The program to run.
1121 pub program: String,
1122
1123 /// The list of arguments to the program.
1124 pub args: &'a [String],
1125
1126 /// The state of the setup script.
1127 pub state: UnitState,
1128
1129 /// Output obtained from the setup script.
1130 pub output: ChildExecutionOutput,
1131}
1132
1133/// A test's response to an information request.
1134#[derive(Clone, Debug)]
1135pub struct TestInfoResponse<'a> {
1136 /// The stress index of the test.
1137 pub stress_index: Option<StressIndex>,
1138
1139 /// The test instance that the information is about.
1140 pub test_instance: TestInstanceId<'a>,
1141
1142 /// Information about retries.
1143 pub retry_data: RetryData,
1144
1145 /// The state of the test.
1146 pub state: UnitState,
1147
1148 /// Output obtained from the test.
1149 pub output: ChildExecutionOutput,
1150}
1151
1152/// The current state of a test or script process: running, exiting, or
1153/// terminating.
1154///
1155/// Part of information response requests.
1156#[derive(Clone, Debug)]
1157pub enum UnitState {
1158 /// The unit is currently running.
1159 Running {
1160 /// The process ID.
1161 pid: u32,
1162
1163 /// The amount of time the unit has been running.
1164 time_taken: Duration,
1165
1166 /// `Some` if the test is marked as slow, along with the duration after
1167 /// which it was marked as slow.
1168 slow_after: Option<Duration>,
1169 },
1170
1171 /// The test has finished running, and is currently in the process of
1172 /// exiting.
1173 Exiting {
1174 /// The process ID.
1175 pid: u32,
1176
1177 /// The amount of time the unit ran for.
1178 time_taken: Duration,
1179
1180 /// `Some` if the unit is marked as slow, along with the duration after
1181 /// which it was marked as slow.
1182 slow_after: Option<Duration>,
1183
1184 /// The tentative execution result before leaked status is determined.
1185 ///
1186 /// None means that the exit status could not be read, and should be
1187 /// treated as a failure.
1188 tentative_result: Option<ExecutionResult>,
1189
1190 /// How long has been spent waiting for the process to exit.
1191 waiting_duration: Duration,
1192
1193 /// How much longer nextest will wait until the test is marked leaky.
1194 remaining: Duration,
1195 },
1196
1197 /// The child process is being terminated by nextest.
1198 Terminating(UnitTerminatingState),
1199
1200 /// The unit has finished running and the process has exited.
1201 Exited {
1202 /// The result of executing the unit.
1203 result: ExecutionResult,
1204
1205 /// The amount of time the unit ran for.
1206 time_taken: Duration,
1207
1208 /// `Some` if the unit is marked as slow, along with the duration after
1209 /// which it was marked as slow.
1210 slow_after: Option<Duration>,
1211 },
1212
1213 /// A delay is being waited out before the next attempt of the test is
1214 /// started. (Only relevant for tests.)
1215 DelayBeforeNextAttempt {
1216 /// The previous execution result.
1217 previous_result: ExecutionResult,
1218
1219 /// Whether the previous attempt was marked as slow.
1220 previous_slow: bool,
1221
1222 /// How long has been spent waiting so far.
1223 waiting_duration: Duration,
1224
1225 /// How much longer nextest will wait until retrying the test.
1226 remaining: Duration,
1227 },
1228}
1229
1230impl UnitState {
1231 /// Returns true if the state has a valid output attached to it.
1232 pub fn has_valid_output(&self) -> bool {
1233 match self {
1234 UnitState::Running { .. }
1235 | UnitState::Exiting { .. }
1236 | UnitState::Terminating(_)
1237 | UnitState::Exited { .. } => true,
1238 UnitState::DelayBeforeNextAttempt { .. } => false,
1239 }
1240 }
1241}
1242
1243/// The current terminating state of a test or script process.
1244///
1245/// Part of [`UnitState::Terminating`].
1246#[derive(Clone, Debug)]
1247pub struct UnitTerminatingState {
1248 /// The process ID.
1249 pub pid: u32,
1250
1251 /// The amount of time the unit ran for.
1252 pub time_taken: Duration,
1253
1254 /// The reason for the termination.
1255 pub reason: UnitTerminateReason,
1256
1257 /// The method by which the process is being terminated.
1258 pub method: UnitTerminateMethod,
1259
1260 /// How long has been spent waiting for the process to exit.
1261 pub waiting_duration: Duration,
1262
1263 /// How much longer nextest will wait until a kill command is sent to the process.
1264 pub remaining: Duration,
1265}
1266
1267/// The reason for a script or test being forcibly terminated by nextest.
1268///
1269/// Part of information response requests.
1270#[derive(Clone, Copy, Debug)]
1271pub enum UnitTerminateReason {
1272 /// The unit is being terminated due to a test timeout being hit.
1273 Timeout,
1274
1275 /// The unit is being terminated due to nextest receiving a signal.
1276 Signal,
1277
1278 /// The unit is being terminated due to an interrupt (i.e. Ctrl-C).
1279 Interrupt,
1280}
1281
1282impl fmt::Display for UnitTerminateReason {
1283 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1284 match self {
1285 UnitTerminateReason::Timeout => write!(f, "timeout"),
1286 UnitTerminateReason::Signal => write!(f, "signal"),
1287 UnitTerminateReason::Interrupt => write!(f, "interrupt"),
1288 }
1289 }
1290}
1291
1292/// The way in which a script or test is being forcibly terminated by nextest.
1293#[derive(Clone, Copy, Debug)]
1294pub enum UnitTerminateMethod {
1295 /// The unit is being terminated by sending a signal.
1296 #[cfg(unix)]
1297 Signal(UnitTerminateSignal),
1298
1299 /// The unit is being terminated by terminating the Windows job object.
1300 #[cfg(windows)]
1301 JobObject,
1302
1303 /// The unit is being waited on to exit. A termination signal will be sent
1304 /// if it doesn't exit within the grace period.
1305 ///
1306 /// On Windows, this occurs when nextest receives Ctrl-C. In that case, it
1307 /// is assumed that tests will also receive Ctrl-C and exit on their own. If
1308 /// tests do not exit within the grace period configured for them, their
1309 /// corresponding job objects will be terminated.
1310 #[cfg(windows)]
1311 Wait,
1312
1313 /// A fake method used for testing.
1314 #[cfg(test)]
1315 Fake,
1316}
1317
1318#[cfg(unix)]
1319/// The signal that is or was sent to terminate a script or test.
1320#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1321pub enum UnitTerminateSignal {
1322 /// The unit is being terminated by sending a SIGINT.
1323 Interrupt,
1324
1325 /// The unit is being terminated by sending a SIGTERM signal.
1326 Term,
1327
1328 /// The unit is being terminated by sending a SIGHUP signal.
1329 Hangup,
1330
1331 /// The unit is being terminated by sending a SIGQUIT signal.
1332 Quit,
1333
1334 /// The unit is being terminated by sending a SIGKILL signal.
1335 Kill,
1336}
1337
1338#[cfg(unix)]
1339impl fmt::Display for UnitTerminateSignal {
1340 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1341 match self {
1342 UnitTerminateSignal::Interrupt => write!(f, "SIGINT"),
1343 UnitTerminateSignal::Term => write!(f, "SIGTERM"),
1344 UnitTerminateSignal::Hangup => write!(f, "SIGHUP"),
1345 UnitTerminateSignal::Quit => write!(f, "SIGQUIT"),
1346 UnitTerminateSignal::Kill => write!(f, "SIGKILL"),
1347 }
1348 }
1349}
1350
1351#[cfg(test)]
1352mod tests {
1353 use super::*;
1354
1355 #[test]
1356 fn test_is_success() {
1357 assert_eq!(
1358 RunStats::default().summarize_final(),
1359 FinalRunStats::NoTestsRun,
1360 "empty run => no tests run"
1361 );
1362 assert_eq!(
1363 RunStats {
1364 initial_run_count: 42,
1365 finished_count: 42,
1366 ..RunStats::default()
1367 }
1368 .summarize_final(),
1369 FinalRunStats::Success,
1370 "initial run count = final run count => success"
1371 );
1372 assert_eq!(
1373 RunStats {
1374 initial_run_count: 42,
1375 finished_count: 41,
1376 ..RunStats::default()
1377 }
1378 .summarize_final(),
1379 FinalRunStats::Cancelled(RunStatsFailureKind::Test {
1380 initial_run_count: 42,
1381 not_run: 1
1382 }),
1383 "initial run count > final run count => cancelled"
1384 );
1385 assert_eq!(
1386 RunStats {
1387 initial_run_count: 42,
1388 finished_count: 42,
1389 failed: 1,
1390 ..RunStats::default()
1391 }
1392 .summarize_final(),
1393 FinalRunStats::Failed(RunStatsFailureKind::Test {
1394 initial_run_count: 42,
1395 not_run: 0
1396 }),
1397 "failed => failure"
1398 );
1399 assert_eq!(
1400 RunStats {
1401 initial_run_count: 42,
1402 finished_count: 42,
1403 exec_failed: 1,
1404 ..RunStats::default()
1405 }
1406 .summarize_final(),
1407 FinalRunStats::Failed(RunStatsFailureKind::Test {
1408 initial_run_count: 42,
1409 not_run: 0
1410 }),
1411 "exec failed => failure"
1412 );
1413 assert_eq!(
1414 RunStats {
1415 initial_run_count: 42,
1416 finished_count: 42,
1417 timed_out: 1,
1418 ..RunStats::default()
1419 }
1420 .summarize_final(),
1421 FinalRunStats::Failed(RunStatsFailureKind::Test {
1422 initial_run_count: 42,
1423 not_run: 0
1424 }),
1425 "timed out => failure"
1426 );
1427 assert_eq!(
1428 RunStats {
1429 initial_run_count: 42,
1430 finished_count: 42,
1431 skipped: 1,
1432 ..RunStats::default()
1433 }
1434 .summarize_final(),
1435 FinalRunStats::Success,
1436 "skipped => not considered a failure"
1437 );
1438
1439 assert_eq!(
1440 RunStats {
1441 setup_scripts_initial_count: 2,
1442 setup_scripts_finished_count: 1,
1443 ..RunStats::default()
1444 }
1445 .summarize_final(),
1446 FinalRunStats::Cancelled(RunStatsFailureKind::SetupScript),
1447 "setup script failed => failure"
1448 );
1449
1450 assert_eq!(
1451 RunStats {
1452 setup_scripts_initial_count: 2,
1453 setup_scripts_finished_count: 2,
1454 setup_scripts_failed: 1,
1455 ..RunStats::default()
1456 }
1457 .summarize_final(),
1458 FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1459 "setup script failed => failure"
1460 );
1461 assert_eq!(
1462 RunStats {
1463 setup_scripts_initial_count: 2,
1464 setup_scripts_finished_count: 2,
1465 setup_scripts_exec_failed: 1,
1466 ..RunStats::default()
1467 }
1468 .summarize_final(),
1469 FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1470 "setup script exec failed => failure"
1471 );
1472 assert_eq!(
1473 RunStats {
1474 setup_scripts_initial_count: 2,
1475 setup_scripts_finished_count: 2,
1476 setup_scripts_timed_out: 1,
1477 ..RunStats::default()
1478 }
1479 .summarize_final(),
1480 FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1481 "setup script timed out => failure"
1482 );
1483 assert_eq!(
1484 RunStats {
1485 setup_scripts_initial_count: 2,
1486 setup_scripts_finished_count: 2,
1487 setup_scripts_passed: 2,
1488 ..RunStats::default()
1489 }
1490 .summarize_final(),
1491 FinalRunStats::NoTestsRun,
1492 "setup scripts passed => success, but no tests run"
1493 );
1494 }
1495}