nextest_runner/reporter/events.rs
1// Copyright (c) The nextest Contributors
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Events for the reporter.
5//!
6//! These types form the interface between the test runner and the test
7//! reporter. The root structure for all events is [`TestEvent`].
8
9use super::{FinalStatusLevel, StatusLevel, TestOutputDisplay};
10use crate::{
11 config::{
12 elements::{LeakTimeoutResult, SlowTimeoutResult},
13 scripts::ScriptId,
14 },
15 list::{TestInstance, TestInstanceId, TestList},
16 runner::{StressCondition, StressCount},
17 test_output::ChildExecutionOutput,
18};
19use chrono::{DateTime, FixedOffset};
20use nextest_metadata::MismatchReason;
21use quick_junit::ReportUuid;
22use std::{collections::BTreeMap, fmt, num::NonZero, process::ExitStatus, time::Duration};
23
24/// A reporter event.
25#[derive(Clone, Debug)]
26pub enum ReporterEvent<'a> {
27 /// A periodic tick.
28 Tick,
29
30 /// A test event.
31 Test(Box<TestEvent<'a>>),
32}
33/// A test event.
34///
35/// Events are produced by a [`TestRunner`](crate::runner::TestRunner) and
36/// consumed by a [`Reporter`](crate::reporter::Reporter).
37#[derive(Clone, Debug)]
38pub struct TestEvent<'a> {
39 /// The time at which the event was generated, including the offset from UTC.
40 pub timestamp: DateTime<FixedOffset>,
41
42 /// The amount of time elapsed since the start of the test run.
43 pub elapsed: Duration,
44
45 /// The kind of test event this is.
46 pub kind: TestEventKind<'a>,
47}
48
49/// The kind of test event this is.
50///
51/// Forms part of [`TestEvent`].
52#[derive(Clone, Debug)]
53pub enum TestEventKind<'a> {
54 /// The test run started.
55 RunStarted {
56 /// The list of tests that will be run.
57 ///
58 /// The methods on the test list indicate the number of tests that will be run.
59 test_list: &'a TestList<'a>,
60
61 /// The UUID for this run.
62 run_id: ReportUuid,
63
64 /// The nextest profile chosen for this run.
65 profile_name: String,
66
67 /// The command-line arguments for the process.
68 cli_args: Vec<String>,
69
70 /// The stress condition for this run, if any.
71 stress_condition: Option<StressCondition>,
72 },
73
74 /// When running stress tests serially, a sub-run started.
75 StressSubRunStarted {
76 /// The amount of progress completed so far.
77 progress: StressProgress,
78 },
79
80 /// A setup script started.
81 SetupScriptStarted {
82 /// If a stress test is being run, the stress index, starting from 0.
83 stress_index: Option<StressIndex>,
84
85 /// The setup script index.
86 index: usize,
87
88 /// The total number of setup scripts.
89 total: usize,
90
91 /// The script ID.
92 script_id: ScriptId,
93
94 /// The program to run.
95 program: String,
96
97 /// The arguments to the program.
98 args: &'a [String],
99
100 /// True if some output from the setup script is being passed through.
101 no_capture: bool,
102 },
103
104 /// A setup script was slow.
105 SetupScriptSlow {
106 /// If a stress test is being run, the stress index, starting from 0.
107 stress_index: Option<StressIndex>,
108
109 /// The script ID.
110 script_id: ScriptId,
111
112 /// The program to run.
113 program: String,
114
115 /// The arguments to the program.
116 args: &'a [String],
117
118 /// The amount of time elapsed since the start of execution.
119 elapsed: Duration,
120
121 /// True if the script has hit its timeout and is about to be terminated.
122 will_terminate: bool,
123 },
124
125 /// A setup script completed execution.
126 SetupScriptFinished {
127 /// If a stress test is being run, the stress index, starting from 0.
128 stress_index: Option<StressIndex>,
129
130 /// The setup script index.
131 index: usize,
132
133 /// The total number of setup scripts.
134 total: usize,
135
136 /// The script ID.
137 script_id: ScriptId,
138
139 /// The program to run.
140 program: String,
141
142 /// The arguments to the program.
143 args: &'a [String],
144
145 /// Whether the JUnit report should store success output for this script.
146 junit_store_success_output: bool,
147
148 /// Whether the JUnit report should store failure output for this script.
149 junit_store_failure_output: bool,
150
151 /// True if some output from the setup script was passed through.
152 no_capture: bool,
153
154 /// The execution status of the setup script.
155 run_status: SetupScriptExecuteStatus,
156 },
157
158 // TODO: add events for BinaryStarted and BinaryFinished? May want a slightly different way to
159 // do things, maybe a couple of reporter traits (one for the run as a whole and one for each
160 // binary).
161 /// A test started running.
162 TestStarted {
163 /// If a stress test is being run, the stress index, starting from 0.
164 stress_index: Option<StressIndex>,
165
166 /// The test instance that was started.
167 test_instance: TestInstance<'a>,
168
169 /// Current run statistics so far.
170 current_stats: RunStats,
171
172 /// The number of tests currently running, including this one.
173 running: usize,
174 },
175
176 /// A test was slower than a configured soft timeout.
177 TestSlow {
178 /// If a stress test is being run, the stress index, starting from 0.
179 stress_index: Option<StressIndex>,
180
181 /// The test instance that was slow.
182 test_instance: TestInstance<'a>,
183
184 /// Retry data.
185 retry_data: RetryData,
186
187 /// The amount of time that has elapsed since the beginning of the test.
188 elapsed: Duration,
189
190 /// True if the test has hit its timeout and is about to be terminated.
191 will_terminate: bool,
192 },
193
194 /// A test attempt failed and will be retried in the future.
195 ///
196 /// This event does not occur on the final run of a failing test.
197 TestAttemptFailedWillRetry {
198 /// If a stress test is being run, the stress index, starting from 0.
199 stress_index: Option<StressIndex>,
200
201 /// The test instance that is being retried.
202 test_instance: TestInstance<'a>,
203
204 /// The status of this attempt to run the test. Will never be success.
205 run_status: ExecuteStatus,
206
207 /// The delay before the next attempt to run the test.
208 delay_before_next_attempt: Duration,
209
210 /// Whether failure outputs are printed out.
211 failure_output: TestOutputDisplay,
212
213 /// The current number of running tests.
214 running: usize,
215 },
216
217 /// A retry has started.
218 TestRetryStarted {
219 /// If a stress test is being run, the stress index, starting from 0.
220 stress_index: Option<StressIndex>,
221
222 /// The test instance that is being retried.
223 test_instance: TestInstance<'a>,
224
225 /// Data related to retries.
226 retry_data: RetryData,
227
228 /// The current number of running tests.
229 running: usize,
230 },
231
232 /// A test finished running.
233 TestFinished {
234 /// If a stress test is being run, the stress index, starting from 0.
235 stress_index: Option<StressIndex>,
236
237 /// The test instance that finished running.
238 test_instance: TestInstance<'a>,
239
240 /// Test setting for success output.
241 success_output: TestOutputDisplay,
242
243 /// Test setting for failure output.
244 failure_output: TestOutputDisplay,
245
246 /// Whether the JUnit report should store success output for this test.
247 junit_store_success_output: bool,
248
249 /// Whether the JUnit report should store failure output for this test.
250 junit_store_failure_output: bool,
251
252 /// Information about all the runs for this test.
253 run_statuses: ExecutionStatuses,
254
255 /// Current statistics for number of tests so far.
256 current_stats: RunStats,
257
258 /// The number of tests that are currently running, excluding this one.
259 running: usize,
260 },
261
262 /// A test was skipped.
263 TestSkipped {
264 /// If a stress test is being run, the stress index, starting from 0.
265 stress_index: Option<StressIndex>,
266
267 /// The test instance that was skipped.
268 test_instance: TestInstance<'a>,
269
270 /// The reason this test was skipped.
271 reason: MismatchReason,
272 },
273
274 /// An information request was received.
275 InfoStarted {
276 /// The number of tasks currently running. This is the same as the
277 /// number of expected responses.
278 total: usize,
279
280 /// Statistics for the run.
281 run_stats: RunStats,
282 },
283
284 /// Information about a script or test was received.
285 InfoResponse {
286 /// The index of the response, starting from 0.
287 index: usize,
288
289 /// The total number of responses expected.
290 total: usize,
291
292 /// The response itself.
293 response: InfoResponse<'a>,
294 },
295
296 /// An information request was completed.
297 InfoFinished {
298 /// The number of responses that were not received. In most cases, this
299 /// is 0.
300 missing: usize,
301 },
302
303 /// `Enter` was pressed. Either a newline or a progress bar snapshot needs
304 /// to be printed.
305 InputEnter {
306 /// Current statistics for number of tests so far.
307 current_stats: RunStats,
308
309 /// The number of tests running.
310 running: usize,
311 },
312
313 /// A cancellation notice was received.
314 RunBeginCancel {
315 /// The number of setup scripts still running.
316 setup_scripts_running: usize,
317
318 /// Current statistics for number of tests so far.
319 ///
320 /// `current_stats.cancel_reason` is set to `Some`.
321 current_stats: RunStats,
322
323 /// The number of tests still running.
324 running: usize,
325 },
326
327 /// A forcible kill was requested due to receiving a signal.
328 RunBeginKill {
329 /// The number of setup scripts still running.
330 setup_scripts_running: usize,
331
332 /// Current statistics for number of tests so far.
333 ///
334 /// `current_stats.cancel_reason` is set to `Some`.
335 current_stats: RunStats,
336
337 /// The number of tests still running.
338 running: usize,
339 },
340
341 /// A SIGTSTP event was received and the run was paused.
342 RunPaused {
343 /// The number of setup scripts running.
344 setup_scripts_running: usize,
345
346 /// The number of tests currently running.
347 running: usize,
348 },
349
350 /// A SIGCONT event was received and the run is being continued.
351 RunContinued {
352 /// The number of setup scripts that will be started up again.
353 setup_scripts_running: usize,
354
355 /// The number of tests that will be started up again.
356 running: usize,
357 },
358
359 /// When running stress tests serially, a sub-run finished.
360 StressSubRunFinished {
361 /// The amount of progress completed so far.
362 progress: StressProgress,
363
364 /// The amount of time it took for this sub-run to complete.
365 sub_elapsed: Duration,
366
367 /// Statistics for the sub-run.
368 sub_stats: RunStats,
369 },
370
371 /// The test run finished.
372 RunFinished {
373 /// The unique ID for this run.
374 run_id: ReportUuid,
375
376 /// The time at which the run was started.
377 start_time: DateTime<FixedOffset>,
378
379 /// The amount of time it took for the tests to run.
380 elapsed: Duration,
381
382 /// Statistics for the run, or overall statistics for stress tests.
383 run_stats: RunFinishedStats,
384 },
385}
386
387/// Progress for a stress test.
388#[derive(Clone, Debug)]
389pub enum StressProgress {
390 /// This is a count-based stress run.
391 Count {
392 /// The total number of stress runs.
393 total: StressCount,
394
395 /// The total time that has elapsed across all stress runs so far.
396 elapsed: Duration,
397
398 /// The number of stress runs that have been completed.
399 completed: u32,
400 },
401
402 /// This is a time-based stress run.
403 Time {
404 /// The total time for the stress run.
405 total: Duration,
406
407 /// The total time that has elapsed across all stress runs so far.
408 elapsed: Duration,
409
410 /// The number of stress runs that have been completed.
411 completed: u32,
412 },
413}
414
415impl StressProgress {
416 /// Returns the remaining amount of work if the progress indicates there's
417 /// still more to do, otherwise `None`.
418 pub fn remaining(&self) -> Option<StressRemaining> {
419 match self {
420 Self::Count {
421 total: StressCount::Count(total),
422 elapsed: _,
423 completed,
424 } => total
425 .get()
426 .checked_sub(*completed)
427 .and_then(|remaining| NonZero::try_from(remaining).ok())
428 .map(StressRemaining::Count),
429 Self::Count {
430 total: StressCount::Infinite,
431 ..
432 } => Some(StressRemaining::Infinite),
433 Self::Time {
434 total,
435 elapsed,
436 completed: _,
437 } => total.checked_sub(*elapsed).map(StressRemaining::Time),
438 }
439 }
440
441 /// Returns a unique ID for this stress sub-run, consisting of the run ID and stress index.
442 pub fn unique_id(&self, run_id: ReportUuid) -> String {
443 let stress_current = match self {
444 Self::Count { completed, .. } | Self::Time { completed, .. } => *completed,
445 };
446 format!("{}:@stress-{}", run_id, stress_current)
447 }
448}
449
450/// For a stress test, the amount of time or number of stress runs remaining.
451#[derive(Clone, Debug)]
452pub enum StressRemaining {
453 /// The number of stress runs remaining, guaranteed to be non-zero.
454 Count(NonZero<u32>),
455
456 /// Infinite number of stress runs remaining.
457 Infinite,
458
459 /// The amount of time remaining.
460 Time(Duration),
461}
462
463/// The index of the current stress run.
464#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
465pub struct StressIndex {
466 /// The 0-indexed index.
467 pub current: u32,
468
469 /// The total number of stress runs, if that is available.
470 pub total: Option<NonZero<u32>>,
471}
472
473/// Statistics for a completed test run or stress run.
474#[derive(Clone, Debug)]
475pub enum RunFinishedStats {
476 /// A single test run was completed.
477 Single(RunStats),
478
479 /// A stress run was completed.
480 Stress(StressRunStats),
481}
482
483impl RunFinishedStats {
484 /// For a single run, returns a summary of statistics as an enum. For a
485 /// stress run, returns a summary for the last sub-run.
486 pub fn final_stats(&self) -> FinalRunStats {
487 match self {
488 Self::Single(stats) => stats.summarize_final(),
489 Self::Stress(stats) => stats.last_final_stats,
490 }
491 }
492}
493
494/// Statistics for a test run.
495#[derive(Copy, Clone, Default, Debug, Eq, PartialEq)]
496pub struct RunStats {
497 /// The total number of tests that were expected to be run at the beginning.
498 ///
499 /// If the test run is cancelled, this will be more than `finished_count` at the end.
500 pub initial_run_count: usize,
501
502 /// The total number of tests that finished running.
503 pub finished_count: usize,
504
505 /// The total number of setup scripts that were expected to be run at the beginning.
506 ///
507 /// If the test run is cancelled, this will be more than `finished_count` at the end.
508 pub setup_scripts_initial_count: usize,
509
510 /// The total number of setup scripts that finished running.
511 pub setup_scripts_finished_count: usize,
512
513 /// The number of setup scripts that passed.
514 pub setup_scripts_passed: usize,
515
516 /// The number of setup scripts that failed.
517 pub setup_scripts_failed: usize,
518
519 /// The number of setup scripts that encountered an execution failure.
520 pub setup_scripts_exec_failed: usize,
521
522 /// The number of setup scripts that timed out.
523 pub setup_scripts_timed_out: usize,
524
525 /// The number of tests that passed. Includes `passed_slow`, `passed_timed_out`, `flaky` and `leaky`.
526 pub passed: usize,
527
528 /// The number of slow tests that passed.
529 pub passed_slow: usize,
530
531 /// The number of timed out tests that passed.
532 pub passed_timed_out: usize,
533
534 /// The number of tests that passed on retry.
535 pub flaky: usize,
536
537 /// The number of tests that failed. Includes `leaky_failed`.
538 pub failed: usize,
539
540 /// The number of failed tests that were slow.
541 pub failed_slow: usize,
542
543 /// The number of timed out tests that failed.
544 pub failed_timed_out: usize,
545
546 /// The number of tests that passed but leaked handles.
547 pub leaky: usize,
548
549 /// The number of tests that otherwise passed, but leaked handles and were
550 /// treated as failed as a result.
551 pub leaky_failed: usize,
552
553 /// The number of tests that encountered an execution failure.
554 pub exec_failed: usize,
555
556 /// The number of tests that were skipped.
557 pub skipped: usize,
558
559 /// If the run is cancelled, the reason the cancellation is happening.
560 pub cancel_reason: Option<CancelReason>,
561}
562
563impl RunStats {
564 /// Returns true if there are any failures recorded in the stats.
565 pub fn has_failures(&self) -> bool {
566 self.failed_setup_script_count() > 0 || self.failed_count() > 0
567 }
568
569 /// Returns count of setup scripts that did not pass.
570 pub fn failed_setup_script_count(&self) -> usize {
571 self.setup_scripts_failed + self.setup_scripts_exec_failed + self.setup_scripts_timed_out
572 }
573
574 /// Returns count of tests that did not pass.
575 pub fn failed_count(&self) -> usize {
576 self.failed + self.exec_failed + self.failed_timed_out
577 }
578
579 /// Summarizes the stats as an enum at the end of a test run.
580 pub fn summarize_final(&self) -> FinalRunStats {
581 // Check for failures first. The order of setup scripts vs tests should
582 // not be important, though we don't assert that here.
583 if self.failed_setup_script_count() > 0 {
584 // Is this related to a cancellation other than one directly caused
585 // by the failure?
586 if self.cancel_reason > Some(CancelReason::TestFailure) {
587 FinalRunStats::Cancelled {
588 reason: self.cancel_reason,
589 kind: RunStatsFailureKind::SetupScript,
590 }
591 } else {
592 FinalRunStats::Failed(RunStatsFailureKind::SetupScript)
593 }
594 } else if self.setup_scripts_initial_count > self.setup_scripts_finished_count {
595 FinalRunStats::Cancelled {
596 reason: self.cancel_reason,
597 kind: RunStatsFailureKind::SetupScript,
598 }
599 } else if self.failed_count() > 0 {
600 let kind = RunStatsFailureKind::Test {
601 initial_run_count: self.initial_run_count,
602 not_run: self.initial_run_count.saturating_sub(self.finished_count),
603 };
604
605 // Is this related to a cancellation other than one directly caused
606 // by the failure?
607 if self.cancel_reason > Some(CancelReason::TestFailure) {
608 FinalRunStats::Cancelled {
609 reason: self.cancel_reason,
610 kind,
611 }
612 } else {
613 FinalRunStats::Failed(kind)
614 }
615 } else if self.initial_run_count > self.finished_count {
616 FinalRunStats::Cancelled {
617 reason: self.cancel_reason,
618 kind: RunStatsFailureKind::Test {
619 initial_run_count: self.initial_run_count,
620 not_run: self.initial_run_count.saturating_sub(self.finished_count),
621 },
622 }
623 } else if self.finished_count == 0 {
624 FinalRunStats::NoTestsRun
625 } else {
626 FinalRunStats::Success
627 }
628 }
629
630 pub(crate) fn on_setup_script_finished(&mut self, status: &SetupScriptExecuteStatus) {
631 self.setup_scripts_finished_count += 1;
632
633 match status.result {
634 ExecutionResult::Pass
635 | ExecutionResult::Leak {
636 result: LeakTimeoutResult::Pass,
637 } => {
638 self.setup_scripts_passed += 1;
639 }
640 ExecutionResult::Fail { .. }
641 | ExecutionResult::Leak {
642 result: LeakTimeoutResult::Fail,
643 } => {
644 self.setup_scripts_failed += 1;
645 }
646 ExecutionResult::ExecFail => {
647 self.setup_scripts_exec_failed += 1;
648 }
649 // Timed out setup scripts are always treated as failures
650 ExecutionResult::Timeout { .. } => {
651 self.setup_scripts_timed_out += 1;
652 }
653 }
654 }
655
656 pub(crate) fn on_test_finished(&mut self, run_statuses: &ExecutionStatuses) {
657 self.finished_count += 1;
658 // run_statuses is guaranteed to have at least one element.
659 // * If the last element is success, treat it as success (and possibly flaky).
660 // * If the last element is a failure, use it to determine fail/exec fail.
661 // Note that this is different from what Maven Surefire does (use the first failure):
662 // https://maven.apache.org/surefire/maven-surefire-plugin/examples/rerun-failing-tests.html
663 //
664 // This is not likely to matter much in practice since failures are likely to be of the
665 // same type.
666 let last_status = run_statuses.last_status();
667 match last_status.result {
668 ExecutionResult::Pass => {
669 self.passed += 1;
670 if last_status.is_slow {
671 self.passed_slow += 1;
672 }
673 if run_statuses.len() > 1 {
674 self.flaky += 1;
675 }
676 }
677 ExecutionResult::Leak {
678 result: LeakTimeoutResult::Pass,
679 } => {
680 self.passed += 1;
681 self.leaky += 1;
682 if last_status.is_slow {
683 self.passed_slow += 1;
684 }
685 if run_statuses.len() > 1 {
686 self.flaky += 1;
687 }
688 }
689 ExecutionResult::Leak {
690 result: LeakTimeoutResult::Fail,
691 } => {
692 self.failed += 1;
693 self.leaky_failed += 1;
694 if last_status.is_slow {
695 self.failed_slow += 1;
696 }
697 }
698 ExecutionResult::Fail { .. } => {
699 self.failed += 1;
700 if last_status.is_slow {
701 self.failed_slow += 1;
702 }
703 }
704 ExecutionResult::Timeout {
705 result: SlowTimeoutResult::Pass,
706 } => {
707 self.passed += 1;
708 self.passed_timed_out += 1;
709 if run_statuses.len() > 1 {
710 self.flaky += 1;
711 }
712 }
713 ExecutionResult::Timeout {
714 result: SlowTimeoutResult::Fail,
715 } => {
716 self.failed_timed_out += 1;
717 }
718 ExecutionResult::ExecFail => self.exec_failed += 1,
719 }
720 }
721}
722
723/// A type summarizing the possible outcomes of a test run.
724#[derive(Copy, Clone, Debug, Eq, PartialEq)]
725pub enum FinalRunStats {
726 /// The test run was successful, or is successful so far.
727 Success,
728
729 /// The test run was successful, or is successful so far, but no tests were selected to run.
730 NoTestsRun,
731
732 /// The test run was cancelled.
733 Cancelled {
734 /// The reason for cancellation, if available.
735 ///
736 /// This should generally be available, but may be None if some tests
737 /// that were selected to run were not executed.
738 reason: Option<CancelReason>,
739
740 /// The kind of failure that occurred.
741 kind: RunStatsFailureKind,
742 },
743
744 /// At least one test failed.
745 Failed(RunStatsFailureKind),
746}
747
748/// Statistics for a stress run.
749#[derive(Clone, Debug)]
750pub struct StressRunStats {
751 /// The number of stress runs completed.
752 pub completed: StressIndex,
753
754 /// The number of stress runs that succeeded.
755 pub success_count: u32,
756
757 /// The number of stress runs that failed.
758 pub failed_count: u32,
759
760 /// The last stress run's `FinalRunStats`.
761 pub last_final_stats: FinalRunStats,
762}
763
764impl StressRunStats {
765 /// Summarizes the stats as an enum at the end of a test run.
766 pub fn summarize_final(&self) -> StressFinalRunStats {
767 if self.failed_count > 0 {
768 StressFinalRunStats::Failed
769 } else if matches!(self.last_final_stats, FinalRunStats::Cancelled { .. }) {
770 StressFinalRunStats::Cancelled
771 } else if matches!(self.last_final_stats, FinalRunStats::NoTestsRun) {
772 StressFinalRunStats::NoTestsRun
773 } else {
774 StressFinalRunStats::Success
775 }
776 }
777}
778
779/// A summary of final statistics for a stress run.
780pub enum StressFinalRunStats {
781 /// The stress run was successful.
782 Success,
783
784 /// No tests were run.
785 NoTestsRun,
786
787 /// The stress run was cancelled.
788 Cancelled,
789
790 /// At least one stress run failed.
791 Failed,
792}
793
794/// A type summarizing the step at which a test run failed.
795#[derive(Copy, Clone, Debug, Eq, PartialEq)]
796pub enum RunStatsFailureKind {
797 /// The run was interrupted during setup script execution.
798 SetupScript,
799
800 /// The run was interrupted during test execution.
801 Test {
802 /// The total number of tests scheduled.
803 initial_run_count: usize,
804
805 /// The number of tests not run, or for a currently-executing test the number queued up to
806 /// run.
807 not_run: usize,
808 },
809}
810
811/// Information about executions of a test, including retries.
812#[derive(Clone, Debug)]
813pub struct ExecutionStatuses {
814 /// This is guaranteed to be non-empty.
815 statuses: Vec<ExecuteStatus>,
816}
817
818#[expect(clippy::len_without_is_empty)] // RunStatuses is never empty
819impl ExecutionStatuses {
820 pub(crate) fn new(statuses: Vec<ExecuteStatus>) -> Self {
821 Self { statuses }
822 }
823
824 /// Returns the last execution status.
825 ///
826 /// This status is typically used as the final result.
827 pub fn last_status(&self) -> &ExecuteStatus {
828 self.statuses
829 .last()
830 .expect("execution statuses is non-empty")
831 }
832
833 /// Iterates over all the statuses.
834 pub fn iter(&self) -> impl DoubleEndedIterator<Item = &'_ ExecuteStatus> + '_ {
835 self.statuses.iter()
836 }
837
838 /// Returns the number of times the test was executed.
839 pub fn len(&self) -> usize {
840 self.statuses.len()
841 }
842
843 /// Returns a description of self.
844 pub fn describe(&self) -> ExecutionDescription<'_> {
845 let last_status = self.last_status();
846 if last_status.result.is_success() {
847 if self.statuses.len() > 1 {
848 ExecutionDescription::Flaky {
849 last_status,
850 prior_statuses: &self.statuses[..self.statuses.len() - 1],
851 }
852 } else {
853 ExecutionDescription::Success {
854 single_status: last_status,
855 }
856 }
857 } else {
858 let first_status = self
859 .statuses
860 .first()
861 .expect("execution statuses is non-empty");
862 let retries = &self.statuses[1..];
863 ExecutionDescription::Failure {
864 first_status,
865 last_status,
866 retries,
867 }
868 }
869 }
870}
871
872/// A description of test executions obtained from `ExecuteStatuses`.
873///
874/// This can be used to quickly determine whether a test passed, failed or was flaky.
875#[derive(Copy, Clone, Debug)]
876pub enum ExecutionDescription<'a> {
877 /// The test was run once and was successful.
878 Success {
879 /// The status of the test.
880 single_status: &'a ExecuteStatus,
881 },
882
883 /// The test was run more than once. The final result was successful.
884 Flaky {
885 /// The last, successful status.
886 last_status: &'a ExecuteStatus,
887
888 /// Previous statuses, none of which are successes.
889 prior_statuses: &'a [ExecuteStatus],
890 },
891
892 /// The test was run once, or possibly multiple times. All runs failed.
893 Failure {
894 /// The first, failing status.
895 first_status: &'a ExecuteStatus,
896
897 /// The last, failing status. Same as the first status if no retries were performed.
898 last_status: &'a ExecuteStatus,
899
900 /// Any retries that were performed. All of these runs failed.
901 ///
902 /// May be empty.
903 retries: &'a [ExecuteStatus],
904 },
905}
906
907impl<'a> ExecutionDescription<'a> {
908 /// Returns the status level for this `ExecutionDescription`.
909 pub fn status_level(&self) -> StatusLevel {
910 match self {
911 ExecutionDescription::Success { single_status } => match single_status.result {
912 ExecutionResult::Leak {
913 result: LeakTimeoutResult::Pass,
914 } => StatusLevel::Leak,
915 ExecutionResult::Pass => StatusLevel::Pass,
916 other => unreachable!("Success only permits Pass or Leak Pass, found {other:?}"),
917 },
918 // A flaky test implies that we print out retry information for it.
919 ExecutionDescription::Flaky { .. } => StatusLevel::Retry,
920 ExecutionDescription::Failure { .. } => StatusLevel::Fail,
921 }
922 }
923
924 /// Returns the final status level for this `ExecutionDescription`.
925 pub fn final_status_level(&self) -> FinalStatusLevel {
926 match self {
927 ExecutionDescription::Success { single_status, .. } => {
928 // Slow is higher priority than leaky, so return slow first here.
929 if single_status.is_slow {
930 FinalStatusLevel::Slow
931 } else {
932 match single_status.result {
933 ExecutionResult::Pass => FinalStatusLevel::Pass,
934 ExecutionResult::Leak {
935 result: LeakTimeoutResult::Pass,
936 } => FinalStatusLevel::Leak,
937 other => {
938 unreachable!("Success only permits Pass or Leak Pass, found {other:?}")
939 }
940 }
941 }
942 }
943 // A flaky test implies that we print out retry information for it.
944 ExecutionDescription::Flaky { .. } => FinalStatusLevel::Flaky,
945 ExecutionDescription::Failure { .. } => FinalStatusLevel::Fail,
946 }
947 }
948
949 /// Returns the last run status.
950 pub fn last_status(&self) -> &'a ExecuteStatus {
951 match self {
952 ExecutionDescription::Success {
953 single_status: last_status,
954 }
955 | ExecutionDescription::Flaky { last_status, .. }
956 | ExecutionDescription::Failure { last_status, .. } => last_status,
957 }
958 }
959}
960
961/// Information about a single execution of a test.
962#[derive(Clone, Debug)]
963pub struct ExecuteStatus {
964 /// Retry-related data.
965 pub retry_data: RetryData,
966 /// The stdout and stderr output for this test.
967 pub output: ChildExecutionOutput,
968 /// The execution result for this test: pass, fail or execution error.
969 pub result: ExecutionResult,
970 /// The time at which the test started.
971 pub start_time: DateTime<FixedOffset>,
972 /// The time it took for the test to run.
973 pub time_taken: Duration,
974 /// Whether this test counts as slow.
975 pub is_slow: bool,
976 /// The delay will be non-zero if this is a retry and delay was specified.
977 pub delay_before_start: Duration,
978}
979
980/// Information about the execution of a setup script.
981#[derive(Clone, Debug)]
982pub struct SetupScriptExecuteStatus {
983 /// Output for this setup script.
984 pub output: ChildExecutionOutput,
985
986 /// The execution result for this setup script: pass, fail or execution error.
987 pub result: ExecutionResult,
988
989 /// The time at which the script started.
990 pub start_time: DateTime<FixedOffset>,
991
992 /// The time it took for the script to run.
993 pub time_taken: Duration,
994
995 /// Whether this script counts as slow.
996 pub is_slow: bool,
997
998 /// The map of environment variables that were set by this script.
999 ///
1000 /// `None` if an error occurred while running the script or reading the
1001 /// environment map.
1002 pub env_map: Option<SetupScriptEnvMap>,
1003}
1004
1005/// A map of environment variables set by a setup script.
1006///
1007/// Part of [`SetupScriptExecuteStatus`].
1008#[derive(Clone, Debug)]
1009pub struct SetupScriptEnvMap {
1010 /// The map of environment variables set by the script.
1011 pub env_map: BTreeMap<String, String>,
1012}
1013
1014/// Data related to retries for a test.
1015#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)]
1016pub struct RetryData {
1017 /// The current attempt. In the range `[1, total_attempts]`.
1018 pub attempt: u32,
1019
1020 /// The total number of times this test can be run. Equal to `1 + retries`.
1021 pub total_attempts: u32,
1022}
1023
1024impl RetryData {
1025 /// Returns true if there are no more attempts after this.
1026 pub fn is_last_attempt(&self) -> bool {
1027 self.attempt >= self.total_attempts
1028 }
1029}
1030
1031/// Whether a test passed, failed or an error occurred while executing the test.
1032#[derive(Copy, Clone, Debug, Eq, PartialEq)]
1033pub enum ExecutionResult {
1034 /// The test passed.
1035 Pass,
1036 /// The test passed but leaked handles. This usually indicates that
1037 /// a subprocess that inherit standard IO was created, but it didn't shut down when
1038 /// the test failed.
1039 Leak {
1040 /// Whether this leak was treated as a failure.
1041 ///
1042 /// Note the difference between `Fail { leaked: true }` and `Leak {
1043 /// failed: true }`. In the former case, the test failed and also leaked
1044 /// handles. In the latter case, the test passed but leaked handles, and
1045 /// configuration indicated that this is a failure.
1046 result: LeakTimeoutResult,
1047 },
1048 /// The test failed.
1049 Fail {
1050 /// The abort status of the test, if any (for example, the signal on Unix).
1051 failure_status: FailureStatus,
1052
1053 /// Whether a test leaked handles. If set to true, this usually indicates that
1054 /// a subprocess that inherit standard IO was created, but it didn't shut down when
1055 /// the test failed.
1056 leaked: bool,
1057 },
1058 /// An error occurred while executing the test.
1059 ExecFail,
1060 /// The test was terminated due to a timeout.
1061 Timeout {
1062 /// Whether this timeout was treated as a failure.
1063 result: SlowTimeoutResult,
1064 },
1065}
1066
1067impl ExecutionResult {
1068 /// Returns true if the test was successful.
1069 pub fn is_success(self) -> bool {
1070 match self {
1071 ExecutionResult::Pass
1072 | ExecutionResult::Timeout {
1073 result: SlowTimeoutResult::Pass,
1074 }
1075 | ExecutionResult::Leak {
1076 result: LeakTimeoutResult::Pass,
1077 } => true,
1078 ExecutionResult::Leak {
1079 result: LeakTimeoutResult::Fail,
1080 }
1081 | ExecutionResult::Fail { .. }
1082 | ExecutionResult::ExecFail
1083 | ExecutionResult::Timeout {
1084 result: SlowTimeoutResult::Fail,
1085 } => false,
1086 }
1087 }
1088
1089 /// Returns true if this result represents a test that was terminated by nextest
1090 /// (as opposed to failing naturally).
1091 ///
1092 /// This is used to suppress output spam when immediate termination is active.
1093 ///
1094 /// TODO: This is a heuristic that checks if the test was terminated by SIGTERM (Unix) or
1095 /// job object (Windows). In an edge case, a test could send SIGTERM to itself, which would
1096 /// incorrectly be detected as a nextest-initiated termination. A more robust solution would
1097 /// track which tests were explicitly sent termination signals by nextest.
1098 pub fn is_termination_failure(&self) -> bool {
1099 match self {
1100 #[cfg(unix)]
1101 ExecutionResult::Fail {
1102 failure_status: FailureStatus::Abort(AbortStatus::UnixSignal(libc::SIGTERM)),
1103 ..
1104 } => true,
1105 #[cfg(windows)]
1106 ExecutionResult::Fail {
1107 failure_status: FailureStatus::Abort(AbortStatus::JobObject),
1108 ..
1109 } => true,
1110 _ => false,
1111 }
1112 }
1113
1114 /// Returns a static string representation of the result.
1115 pub fn as_static_str(&self) -> &'static str {
1116 match self {
1117 ExecutionResult::Pass => "pass",
1118 ExecutionResult::Leak { .. } => "leak",
1119 ExecutionResult::Fail { .. } => "fail",
1120 ExecutionResult::ExecFail => "exec-fail",
1121 ExecutionResult::Timeout { .. } => "timeout",
1122 }
1123 }
1124}
1125
1126/// Failure status: either an exit code or an abort status.
1127#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1128pub enum FailureStatus {
1129 /// The test exited with a non-zero exit code.
1130 ExitCode(i32),
1131
1132 /// The test aborted.
1133 Abort(AbortStatus),
1134}
1135
1136impl FailureStatus {
1137 /// Extract the failure status from an `ExitStatus`.
1138 pub fn extract(exit_status: ExitStatus) -> Self {
1139 if let Some(abort_status) = AbortStatus::extract(exit_status) {
1140 FailureStatus::Abort(abort_status)
1141 } else {
1142 FailureStatus::ExitCode(
1143 exit_status
1144 .code()
1145 .expect("if abort_status is None, then code must be present"),
1146 )
1147 }
1148 }
1149}
1150
1151/// A regular exit code or Windows NT abort status for a test.
1152///
1153/// Returned as part of the [`ExecutionResult::Fail`] variant.
1154#[derive(Copy, Clone, Eq, PartialEq)]
1155pub enum AbortStatus {
1156 /// The test was aborted due to a signal on Unix.
1157 #[cfg(unix)]
1158 UnixSignal(i32),
1159
1160 /// The test was determined to have aborted because the high bit was set on Windows.
1161 #[cfg(windows)]
1162 WindowsNtStatus(windows_sys::Win32::Foundation::NTSTATUS),
1163
1164 /// The test was terminated via job object on Windows.
1165 #[cfg(windows)]
1166 JobObject,
1167}
1168
1169impl AbortStatus {
1170 /// Extract the abort status from an [`ExitStatus`].
1171 pub fn extract(exit_status: ExitStatus) -> Option<Self> {
1172 cfg_if::cfg_if! {
1173 if #[cfg(unix)] {
1174 // On Unix, extract the signal if it's found.
1175 use std::os::unix::process::ExitStatusExt;
1176 exit_status.signal().map(AbortStatus::UnixSignal)
1177 } else if #[cfg(windows)] {
1178 exit_status.code().and_then(|code| {
1179 (code < 0).then_some(AbortStatus::WindowsNtStatus(code))
1180 })
1181 } else {
1182 None
1183 }
1184 }
1185 }
1186}
1187
1188impl fmt::Debug for AbortStatus {
1189 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1190 match self {
1191 #[cfg(unix)]
1192 AbortStatus::UnixSignal(signal) => write!(f, "UnixSignal({signal})"),
1193 #[cfg(windows)]
1194 AbortStatus::WindowsNtStatus(status) => write!(f, "WindowsNtStatus({status:x})"),
1195 #[cfg(windows)]
1196 AbortStatus::JobObject => write!(f, "JobObject"),
1197 }
1198 }
1199}
1200
1201// Note: the order here matters -- it indicates severity of cancellation
1202/// The reason why a test run is being cancelled.
1203#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1204#[cfg_attr(test, derive(test_strategy::Arbitrary))]
1205pub enum CancelReason {
1206 /// A setup script failed.
1207 SetupScriptFailure,
1208
1209 /// A test failed and --no-fail-fast wasn't specified.
1210 TestFailure,
1211
1212 /// An error occurred while reporting results.
1213 ReportError,
1214
1215 /// The global timeout was exceeded.
1216 GlobalTimeout,
1217
1218 /// A test failed and fail-fast with immediate termination was specified.
1219 TestFailureImmediate,
1220
1221 /// A termination signal (on Unix, SIGTERM or SIGHUP) was received.
1222 Signal,
1223
1224 /// An interrupt (on Unix, Ctrl-C) was received.
1225 Interrupt,
1226
1227 /// A second signal was received, and the run is being forcibly killed.
1228 SecondSignal,
1229}
1230
1231impl CancelReason {
1232 pub(crate) fn to_static_str(self) -> &'static str {
1233 match self {
1234 CancelReason::SetupScriptFailure => "setup script failure",
1235 CancelReason::TestFailure => "test failure",
1236 CancelReason::ReportError => "reporting error",
1237 CancelReason::GlobalTimeout => "global timeout",
1238 CancelReason::TestFailureImmediate => "test failure",
1239 CancelReason::Signal => "signal",
1240 CancelReason::Interrupt => "interrupt",
1241 CancelReason::SecondSignal => "second signal",
1242 }
1243 }
1244}
1245/// The kind of unit of work that nextest is executing.
1246#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1247pub enum UnitKind {
1248 /// A test.
1249 Test,
1250
1251 /// A script (e.g. a setup script).
1252 Script,
1253}
1254
1255impl UnitKind {
1256 pub(crate) const WAITING_ON_TEST_MESSAGE: &str = "waiting on test process";
1257 pub(crate) const WAITING_ON_SCRIPT_MESSAGE: &str = "waiting on script process";
1258
1259 pub(crate) const EXECUTING_TEST_MESSAGE: &str = "executing test";
1260 pub(crate) const EXECUTING_SCRIPT_MESSAGE: &str = "executing script";
1261
1262 pub(crate) fn waiting_on_message(&self) -> &'static str {
1263 match self {
1264 UnitKind::Test => Self::WAITING_ON_TEST_MESSAGE,
1265 UnitKind::Script => Self::WAITING_ON_SCRIPT_MESSAGE,
1266 }
1267 }
1268
1269 pub(crate) fn executing_message(&self) -> &'static str {
1270 match self {
1271 UnitKind::Test => Self::EXECUTING_TEST_MESSAGE,
1272 UnitKind::Script => Self::EXECUTING_SCRIPT_MESSAGE,
1273 }
1274 }
1275}
1276
1277impl fmt::Display for UnitKind {
1278 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1279 match self {
1280 UnitKind::Script => write!(f, "script"),
1281 UnitKind::Test => write!(f, "test"),
1282 }
1283 }
1284}
1285
1286/// A response to an information request.
1287#[derive(Clone, Debug)]
1288pub enum InfoResponse<'a> {
1289 /// A setup script's response.
1290 SetupScript(SetupScriptInfoResponse<'a>),
1291
1292 /// A test's response.
1293 Test(TestInfoResponse<'a>),
1294}
1295
1296/// A setup script's response to an information request.
1297#[derive(Clone, Debug)]
1298pub struct SetupScriptInfoResponse<'a> {
1299 /// The stress index of the setup script.
1300 pub stress_index: Option<StressIndex>,
1301
1302 /// The identifier of the setup script instance.
1303 pub script_id: ScriptId,
1304
1305 /// The program to run.
1306 pub program: String,
1307
1308 /// The list of arguments to the program.
1309 pub args: &'a [String],
1310
1311 /// The state of the setup script.
1312 pub state: UnitState,
1313
1314 /// Output obtained from the setup script.
1315 pub output: ChildExecutionOutput,
1316}
1317
1318/// A test's response to an information request.
1319#[derive(Clone, Debug)]
1320pub struct TestInfoResponse<'a> {
1321 /// The stress index of the test.
1322 pub stress_index: Option<StressIndex>,
1323
1324 /// The test instance that the information is about.
1325 pub test_instance: TestInstanceId<'a>,
1326
1327 /// Information about retries.
1328 pub retry_data: RetryData,
1329
1330 /// The state of the test.
1331 pub state: UnitState,
1332
1333 /// Output obtained from the test.
1334 pub output: ChildExecutionOutput,
1335}
1336
1337/// The current state of a test or script process: running, exiting, or
1338/// terminating.
1339///
1340/// Part of information response requests.
1341#[derive(Clone, Debug)]
1342pub enum UnitState {
1343 /// The unit is currently running.
1344 Running {
1345 /// The process ID.
1346 pid: u32,
1347
1348 /// The amount of time the unit has been running.
1349 time_taken: Duration,
1350
1351 /// `Some` if the test is marked as slow, along with the duration after
1352 /// which it was marked as slow.
1353 slow_after: Option<Duration>,
1354 },
1355
1356 /// The test has finished running, and is currently in the process of
1357 /// exiting.
1358 Exiting {
1359 /// The process ID.
1360 pid: u32,
1361
1362 /// The amount of time the unit ran for.
1363 time_taken: Duration,
1364
1365 /// `Some` if the unit is marked as slow, along with the duration after
1366 /// which it was marked as slow.
1367 slow_after: Option<Duration>,
1368
1369 /// The tentative execution result before leaked status is determined.
1370 ///
1371 /// None means that the exit status could not be read, and should be
1372 /// treated as a failure.
1373 tentative_result: Option<ExecutionResult>,
1374
1375 /// How long has been spent waiting for the process to exit.
1376 waiting_duration: Duration,
1377
1378 /// How much longer nextest will wait until the test is marked leaky.
1379 remaining: Duration,
1380 },
1381
1382 /// The child process is being terminated by nextest.
1383 Terminating(UnitTerminatingState),
1384
1385 /// The unit has finished running and the process has exited.
1386 Exited {
1387 /// The result of executing the unit.
1388 result: ExecutionResult,
1389
1390 /// The amount of time the unit ran for.
1391 time_taken: Duration,
1392
1393 /// `Some` if the unit is marked as slow, along with the duration after
1394 /// which it was marked as slow.
1395 slow_after: Option<Duration>,
1396 },
1397
1398 /// A delay is being waited out before the next attempt of the test is
1399 /// started. (Only relevant for tests.)
1400 DelayBeforeNextAttempt {
1401 /// The previous execution result.
1402 previous_result: ExecutionResult,
1403
1404 /// Whether the previous attempt was marked as slow.
1405 previous_slow: bool,
1406
1407 /// How long has been spent waiting so far.
1408 waiting_duration: Duration,
1409
1410 /// How much longer nextest will wait until retrying the test.
1411 remaining: Duration,
1412 },
1413}
1414
1415impl UnitState {
1416 /// Returns true if the state has a valid output attached to it.
1417 pub fn has_valid_output(&self) -> bool {
1418 match self {
1419 UnitState::Running { .. }
1420 | UnitState::Exiting { .. }
1421 | UnitState::Terminating(_)
1422 | UnitState::Exited { .. } => true,
1423 UnitState::DelayBeforeNextAttempt { .. } => false,
1424 }
1425 }
1426}
1427
1428/// The current terminating state of a test or script process.
1429///
1430/// Part of [`UnitState::Terminating`].
1431#[derive(Clone, Debug)]
1432pub struct UnitTerminatingState {
1433 /// The process ID.
1434 pub pid: u32,
1435
1436 /// The amount of time the unit ran for.
1437 pub time_taken: Duration,
1438
1439 /// The reason for the termination.
1440 pub reason: UnitTerminateReason,
1441
1442 /// The method by which the process is being terminated.
1443 pub method: UnitTerminateMethod,
1444
1445 /// How long has been spent waiting for the process to exit.
1446 pub waiting_duration: Duration,
1447
1448 /// How much longer nextest will wait until a kill command is sent to the process.
1449 pub remaining: Duration,
1450}
1451
1452/// The reason for a script or test being forcibly terminated by nextest.
1453///
1454/// Part of information response requests.
1455#[derive(Clone, Copy, Debug)]
1456pub enum UnitTerminateReason {
1457 /// The unit is being terminated due to a test timeout being hit.
1458 Timeout,
1459
1460 /// The unit is being terminated due to nextest receiving a signal.
1461 Signal,
1462
1463 /// The unit is being terminated due to an interrupt (i.e. Ctrl-C).
1464 Interrupt,
1465}
1466
1467impl fmt::Display for UnitTerminateReason {
1468 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1469 match self {
1470 UnitTerminateReason::Timeout => write!(f, "timeout"),
1471 UnitTerminateReason::Signal => write!(f, "signal"),
1472 UnitTerminateReason::Interrupt => write!(f, "interrupt"),
1473 }
1474 }
1475}
1476
1477/// The way in which a script or test is being forcibly terminated by nextest.
1478#[derive(Clone, Copy, Debug)]
1479pub enum UnitTerminateMethod {
1480 /// The unit is being terminated by sending a signal.
1481 #[cfg(unix)]
1482 Signal(UnitTerminateSignal),
1483
1484 /// The unit is being terminated by terminating the Windows job object.
1485 #[cfg(windows)]
1486 JobObject,
1487
1488 /// The unit is being waited on to exit. A termination signal will be sent
1489 /// if it doesn't exit within the grace period.
1490 ///
1491 /// On Windows, this occurs when nextest receives Ctrl-C. In that case, it
1492 /// is assumed that tests will also receive Ctrl-C and exit on their own. If
1493 /// tests do not exit within the grace period configured for them, their
1494 /// corresponding job objects will be terminated.
1495 #[cfg(windows)]
1496 Wait,
1497
1498 /// A fake method used for testing.
1499 #[cfg(test)]
1500 Fake,
1501}
1502
1503#[cfg(unix)]
1504/// The signal that is or was sent to terminate a script or test.
1505#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1506pub enum UnitTerminateSignal {
1507 /// The unit is being terminated by sending a SIGINT.
1508 Interrupt,
1509
1510 /// The unit is being terminated by sending a SIGTERM signal.
1511 Term,
1512
1513 /// The unit is being terminated by sending a SIGHUP signal.
1514 Hangup,
1515
1516 /// The unit is being terminated by sending a SIGQUIT signal.
1517 Quit,
1518
1519 /// The unit is being terminated by sending a SIGKILL signal.
1520 Kill,
1521}
1522
1523#[cfg(unix)]
1524impl fmt::Display for UnitTerminateSignal {
1525 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1526 match self {
1527 UnitTerminateSignal::Interrupt => write!(f, "SIGINT"),
1528 UnitTerminateSignal::Term => write!(f, "SIGTERM"),
1529 UnitTerminateSignal::Hangup => write!(f, "SIGHUP"),
1530 UnitTerminateSignal::Quit => write!(f, "SIGQUIT"),
1531 UnitTerminateSignal::Kill => write!(f, "SIGKILL"),
1532 }
1533 }
1534}
1535
1536#[cfg(test)]
1537mod tests {
1538 use super::*;
1539
1540 #[test]
1541 fn test_is_success() {
1542 assert_eq!(
1543 RunStats::default().summarize_final(),
1544 FinalRunStats::NoTestsRun,
1545 "empty run => no tests run"
1546 );
1547 assert_eq!(
1548 RunStats {
1549 initial_run_count: 42,
1550 finished_count: 42,
1551 ..RunStats::default()
1552 }
1553 .summarize_final(),
1554 FinalRunStats::Success,
1555 "initial run count = final run count => success"
1556 );
1557 assert_eq!(
1558 RunStats {
1559 initial_run_count: 42,
1560 finished_count: 41,
1561 ..RunStats::default()
1562 }
1563 .summarize_final(),
1564 FinalRunStats::Cancelled {
1565 reason: None,
1566 kind: RunStatsFailureKind::Test {
1567 initial_run_count: 42,
1568 not_run: 1
1569 }
1570 },
1571 "initial run count > final run count => cancelled"
1572 );
1573 assert_eq!(
1574 RunStats {
1575 initial_run_count: 42,
1576 finished_count: 42,
1577 failed: 1,
1578 ..RunStats::default()
1579 }
1580 .summarize_final(),
1581 FinalRunStats::Failed(RunStatsFailureKind::Test {
1582 initial_run_count: 42,
1583 not_run: 0
1584 }),
1585 "failed => failure"
1586 );
1587 assert_eq!(
1588 RunStats {
1589 initial_run_count: 42,
1590 finished_count: 42,
1591 exec_failed: 1,
1592 ..RunStats::default()
1593 }
1594 .summarize_final(),
1595 FinalRunStats::Failed(RunStatsFailureKind::Test {
1596 initial_run_count: 42,
1597 not_run: 0
1598 }),
1599 "exec failed => failure"
1600 );
1601 assert_eq!(
1602 RunStats {
1603 initial_run_count: 42,
1604 finished_count: 42,
1605 failed_timed_out: 1,
1606 ..RunStats::default()
1607 }
1608 .summarize_final(),
1609 FinalRunStats::Failed(RunStatsFailureKind::Test {
1610 initial_run_count: 42,
1611 not_run: 0
1612 }),
1613 "timed out => failure {:?} {:?}",
1614 RunStats {
1615 initial_run_count: 42,
1616 finished_count: 42,
1617 failed_timed_out: 1,
1618 ..RunStats::default()
1619 }
1620 .summarize_final(),
1621 FinalRunStats::Failed(RunStatsFailureKind::Test {
1622 initial_run_count: 42,
1623 not_run: 0
1624 }),
1625 );
1626 assert_eq!(
1627 RunStats {
1628 initial_run_count: 42,
1629 finished_count: 42,
1630 skipped: 1,
1631 ..RunStats::default()
1632 }
1633 .summarize_final(),
1634 FinalRunStats::Success,
1635 "skipped => not considered a failure"
1636 );
1637
1638 assert_eq!(
1639 RunStats {
1640 setup_scripts_initial_count: 2,
1641 setup_scripts_finished_count: 1,
1642 ..RunStats::default()
1643 }
1644 .summarize_final(),
1645 FinalRunStats::Cancelled {
1646 reason: None,
1647 kind: RunStatsFailureKind::SetupScript,
1648 },
1649 "setup script failed => failure"
1650 );
1651
1652 assert_eq!(
1653 RunStats {
1654 setup_scripts_initial_count: 2,
1655 setup_scripts_finished_count: 2,
1656 setup_scripts_failed: 1,
1657 ..RunStats::default()
1658 }
1659 .summarize_final(),
1660 FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1661 "setup script failed => failure"
1662 );
1663 assert_eq!(
1664 RunStats {
1665 setup_scripts_initial_count: 2,
1666 setup_scripts_finished_count: 2,
1667 setup_scripts_exec_failed: 1,
1668 ..RunStats::default()
1669 }
1670 .summarize_final(),
1671 FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1672 "setup script exec failed => failure"
1673 );
1674 assert_eq!(
1675 RunStats {
1676 setup_scripts_initial_count: 2,
1677 setup_scripts_finished_count: 2,
1678 setup_scripts_timed_out: 1,
1679 ..RunStats::default()
1680 }
1681 .summarize_final(),
1682 FinalRunStats::Failed(RunStatsFailureKind::SetupScript),
1683 "setup script timed out => failure"
1684 );
1685 assert_eq!(
1686 RunStats {
1687 setup_scripts_initial_count: 2,
1688 setup_scripts_finished_count: 2,
1689 setup_scripts_passed: 2,
1690 ..RunStats::default()
1691 }
1692 .summarize_final(),
1693 FinalRunStats::NoTestsRun,
1694 "setup scripts passed => success, but no tests run"
1695 );
1696 }
1697}