fixture_data/
models.rs

1// Copyright (c) The nextest Contributors
2// SPDX-License-Identifier: MIT OR Apache-2.0
3
4//! Data models for fixture information.
5
6use iddqd::{IdOrdItem, IdOrdMap, id_upcast};
7use nextest_metadata::{BuildPlatform, FilterMatch, RustBinaryId, TestCaseName};
8
9/// The expected result for a test execution, including both the outcome and the
10/// expected rerun behavior.
11#[derive(Clone, Copy, Debug, PartialEq, Eq)]
12pub struct ExpectedTestResult {
13    /// The expected outcome.
14    pub result: CheckResult,
15    /// The expected rerun behavior.
16    pub expected_reruns: ExpectedReruns,
17}
18
19/// The expected outcome of a test execution.
20#[derive(Clone, Copy, Debug, PartialEq, Eq)]
21pub enum CheckResult {
22    Pass,
23    Leak,
24    LeakFail,
25    Fail,
26    FlakyFail,
27    /// The test is a flaky-fail (counts as a run failure), but is configured
28    /// with `junit.flaky-fail-status = "success"` so it appears as a success
29    /// in JUnit XML output.
30    FlakyFailJunitSuccess,
31    FailLeak,
32    Abort,
33    Timeout,
34}
35
36impl CheckResult {
37    /// Returns true if this result represents a test failure of any kind.
38    ///
39    /// `Leak` is not a failure: the test passed but leaked subprocess handles.
40    /// `LeakFail` is a failure: the test was marked as failed due to leaked
41    /// handles.
42    pub fn is_failure(self) -> bool {
43        match self {
44            CheckResult::Pass | CheckResult::Leak => false,
45            CheckResult::LeakFail
46            | CheckResult::Fail
47            | CheckResult::FlakyFail
48            | CheckResult::FlakyFailJunitSuccess
49            | CheckResult::FailLeak
50            | CheckResult::Abort
51            | CheckResult::Timeout => true,
52        }
53    }
54
55    /// Converts this result to its terminal representation.
56    ///
57    /// Terminal output cannot distinguish between `FlakyFail` and
58    /// `FlakyFailJunitSuccess` — both display as `FLKY-FL`.
59    pub fn to_terminal(self) -> TerminalCheckResult {
60        match self {
61            CheckResult::Pass => TerminalCheckResult::Pass,
62            CheckResult::Leak => TerminalCheckResult::Leak,
63            CheckResult::LeakFail => TerminalCheckResult::LeakFail,
64            CheckResult::Fail => TerminalCheckResult::Fail,
65            CheckResult::FlakyFail | CheckResult::FlakyFailJunitSuccess => {
66                TerminalCheckResult::FlakyFail
67            }
68            CheckResult::FailLeak => TerminalCheckResult::FailLeak,
69            CheckResult::Abort => TerminalCheckResult::Abort,
70            CheckResult::Timeout => TerminalCheckResult::Timeout,
71        }
72    }
73}
74
75/// The result of a test as it appears in terminal output.
76///
77/// This is separate from [`CheckResult`] because some model-level distinctions
78/// (e.g., `FlakyFailJunitSuccess` vs `FlakyFail`) are invisible in terminal
79/// output.
80#[derive(Clone, Copy, Debug, PartialEq, Eq)]
81pub enum TerminalCheckResult {
82    Pass,
83    Leak,
84    LeakFail,
85    Fail,
86    FlakyFail,
87    FailLeak,
88    Abort,
89    Timeout,
90}
91
92/// What rerun behavior to expect for a test case.
93#[derive(Clone, Copy, Debug, PartialEq, Eq)]
94pub enum ExpectedReruns {
95    /// No reruns expected (no retries configured, or test doesn't retry).
96    None,
97    /// Exactly N flaky runs expected (test passed on attempt N+1).
98    FlakyRunCount(usize),
99    /// Some reruns expected but the exact count is unknown (failing test with
100    /// retries, where the count depends on per-test profile overrides that the
101    /// fixture data model doesn't track).
102    SomeReruns,
103}
104
105bitflags::bitflags! {
106    /// Properties that control which tests should be run in integration test invocations.
107    #[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
108    pub struct RunProperties: u64 {
109        const RELOCATED = 0x1;
110        const WITH_DEFAULT_FILTER = 0x2;
111        // --skip cdylib
112        const WITH_SKIP_CDYLIB_FILTER = 0x4;
113        // --exact test_multiply_two tests::test_multiply_two_cdylib
114        const WITH_MULTIPLY_TWO_EXACT_FILTER = 0x8;
115        const CDYLIB_EXAMPLE_PACKAGE_FILTER = 0x10;
116        const SKIP_SUMMARY_CHECK = 0x20;
117        const EXPECT_NO_BINARIES = 0x40;
118        const BENCHMARKS = 0x80;
119        /// Run ignored benchmarks with the `with-bench-override` profile.
120        const BENCH_OVERRIDE_TIMEOUT = 0x100;
121        /// Run ignored benchmarks with the `with-bench-termination` profile.
122        const BENCH_TERMINATION = 0x200;
123        /// Run benchmarks with the `with-test-termination-only` profile.
124        const BENCH_IGNORES_TEST_TIMEOUT = 0x400;
125        /// Run ignored tests only (--run-ignored only), excluding slow timeout tests.
126        const RUN_IGNORED_ONLY = 0x800;
127        /// Run with with-timeout-retries-success profile, slow_timeout tests only.
128        /// These tests time out but pass due to on-timeout=pass.
129        const TIMEOUT_RETRIES_PASS = 0x1000;
130        /// Run with with-timeout-retries-success profile, flaky slow timeout test only.
131        /// This test fails twice then times out (passes) on the 3rd attempt.
132        const TIMEOUT_RETRIES_FLAKY = 0x2000;
133        /// Run with the with-retries profile. Flaky tests should pass after retries.
134        const WITH_RETRIES = 0x4000;
135        /// Run with a target runner set. On Unix, segfaults are reported as regular
136        /// failures because the passthrough runner doesn't propagate signal info.
137        const WITH_TARGET_RUNNER = 0x8000;
138        /// Run with the with-termination profile. Tests should time out.
139        const WITH_TERMINATION = 0x10000;
140        /// Run with the with-timeout-success profile. test_slow_timeout passes
141        /// (on-timeout = "pass"), others fail.
142        const WITH_TIMEOUT_SUCCESS = 0x20000;
143        /// Allow skipped test names to appear in output (e.g., for replay which shows SKIP lines).
144        /// Without this flag, verification fails if any skipped test name appears in the output.
145        const ALLOW_SKIPPED_NAMES_IN_OUTPUT = 0x40000;
146        /// Run with the with-retries-flaky-fail profile. Flaky tests with
147        /// `flaky-result = "fail"` should count as failures.
148        const WITH_RETRIES_FLAKY_FAIL = 0x80000;
149        /// Run with `--flaky-result fail` CLI flag. All flaky tests should
150        /// count as failures, regardless of per-test config.
151        const WITH_CLI_FLAKY_RESULT_FAIL = 0x100000;
152        /// Run with `--flaky-result pass` CLI flag. No flaky tests should
153        /// count as failures, even if config has `flaky-result = "fail"`.
154        const WITH_CLI_FLAKY_RESULT_PASS = 0x200000;
155    }
156}
157
158#[derive(Clone, Debug)]
159pub struct TestSuiteFixture {
160    pub binary_id: RustBinaryId,
161    pub binary_name: &'static str,
162    pub build_platform: BuildPlatform,
163    pub test_cases: IdOrdMap<TestCaseFixture>,
164    properties: TestSuiteFixtureProperties,
165}
166
167impl IdOrdItem for TestSuiteFixture {
168    type Key<'a> = &'a RustBinaryId;
169    fn key(&self) -> Self::Key<'_> {
170        &self.binary_id
171    }
172    id_upcast!();
173}
174
175impl TestSuiteFixture {
176    pub fn new(
177        binary_id: &'static str,
178        binary_name: &'static str,
179        build_platform: BuildPlatform,
180        test_cases: IdOrdMap<TestCaseFixture>,
181    ) -> Self {
182        Self {
183            binary_id: binary_id.into(),
184            binary_name,
185            build_platform,
186            test_cases,
187            properties: TestSuiteFixtureProperties::empty(),
188        }
189    }
190
191    pub fn with_property(mut self, property: TestSuiteFixtureProperties) -> Self {
192        self.properties |= property;
193        self
194    }
195
196    pub fn has_property(&self, property: TestSuiteFixtureProperties) -> bool {
197        self.properties.contains(property)
198    }
199
200    pub fn assert_test_cases_match(&self, other: &IdOrdMap<TestNameAndFilterMatch<'_>>) {
201        if self.test_cases.len() != other.len() {
202            panic!(
203                "test cases mismatch: expected {} test cases, found {}; \
204                 expected: {self:#?}, actual: {other:#?}",
205                self.test_cases.len(),
206                other.len(),
207            );
208        }
209
210        for name_and_filter_match in other {
211            if let Some(test_case) = self.test_cases.get(name_and_filter_match.name) {
212                if test_case.status.is_ignored() == name_and_filter_match.filter_match.is_match() {
213                    panic!(
214                        "test case status mismatch for '{}': expected {:?}, found {:?}; \
215                         expected: {self:#?}, actual: {other:#?}",
216                        name_and_filter_match.name,
217                        test_case.status,
218                        name_and_filter_match.filter_match,
219                    );
220                }
221            } else {
222                panic!(
223                    "test case '{}' not found in test suite '{}'; \
224                     expected: {self:#?}, actual: {other:#?}",
225                    name_and_filter_match.name, self.binary_name,
226                );
227            }
228        }
229    }
230}
231
232bitflags::bitflags! {
233    #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash)]
234    pub struct TestSuiteFixtureProperties: u64 {
235        const NOT_IN_DEFAULT_SET = 0x1;
236        const MATCHES_CDYLIB_EXAMPLE = 0x2;
237    }
238}
239
240#[derive(Clone, Debug)]
241pub struct TestCaseFixture {
242    pub name: TestCaseName,
243    pub status: TestCaseFixtureStatus,
244    properties: TestCaseFixtureProperties,
245}
246
247impl IdOrdItem for TestCaseFixture {
248    type Key<'a> = &'a TestCaseName;
249    fn key(&self) -> Self::Key<'_> {
250        &self.name
251    }
252    id_upcast!();
253}
254
255impl TestCaseFixture {
256    pub fn new(name: &str, status: TestCaseFixtureStatus) -> Self {
257        Self {
258            name: TestCaseName::new(name),
259            status,
260            properties: TestCaseFixtureProperties::empty(),
261        }
262    }
263
264    pub fn with_property(mut self, property: TestCaseFixtureProperties) -> Self {
265        self.properties |= property;
266        self
267    }
268
269    pub fn has_property(&self, property: TestCaseFixtureProperties) -> bool {
270        self.properties.contains(property)
271    }
272
273    /// Determines if this test should be skipped based on run properties and filters.
274    pub fn should_skip(&self, properties: RunProperties) -> bool {
275        // NotInDefaultSet filter.
276        if self.has_property(TestCaseFixtureProperties::NOT_IN_DEFAULT_SET)
277            && properties.contains(RunProperties::WITH_DEFAULT_FILTER)
278        {
279            return true;
280        }
281
282        // NotInDefaultSetUnix filter (Unix-specific).
283        if cfg!(unix)
284            && self.has_property(TestCaseFixtureProperties::NOT_IN_DEFAULT_SET_UNIX)
285            && properties.contains(RunProperties::WITH_DEFAULT_FILTER)
286        {
287            return true;
288        }
289
290        // MatchesCdylib + WithSkipCdylibFilter.
291        if self.has_property(TestCaseFixtureProperties::MATCHES_CDYLIB)
292            && properties.contains(RunProperties::WITH_SKIP_CDYLIB_FILTER)
293        {
294            return true;
295        }
296
297        // WithMultiplyTwoExactFilter - skip tests that don't match.
298        if !self.has_property(TestCaseFixtureProperties::MATCHES_TEST_MULTIPLY_TWO)
299            && properties.contains(RunProperties::WITH_MULTIPLY_TWO_EXACT_FILTER)
300        {
301            return true;
302        }
303
304        // CdyLibExamplePackageFilter - only run test_multiply_two_cdylib.
305        if properties.contains(RunProperties::CDYLIB_EXAMPLE_PACKAGE_FILTER)
306            && self.name != TestCaseName::new("tests::test_multiply_two_cdylib")
307        {
308            return true;
309        }
310
311        // ExpectNoBinaries - all tests should be skipped.
312        if properties.contains(RunProperties::EXPECT_NO_BINARIES) {
313            return true;
314        }
315
316        // BenchOverrideTimeout - only run the specific benchmark that times out.
317        if properties.contains(RunProperties::BENCH_OVERRIDE_TIMEOUT) {
318            return !self.has_property(TestCaseFixtureProperties::BENCH_OVERRIDE_TIMEOUT);
319        }
320
321        // BenchTermination - only run the specific benchmark that times out.
322        if properties.contains(RunProperties::BENCH_TERMINATION) {
323            return !self.has_property(TestCaseFixtureProperties::BENCH_TERMINATION);
324        }
325
326        // BenchIgnoresTestTimeout - only run the specific benchmark that passes.
327        if properties.contains(RunProperties::BENCH_IGNORES_TEST_TIMEOUT) {
328            return !self.has_property(TestCaseFixtureProperties::BENCH_IGNORES_TEST_TIMEOUT);
329        }
330
331        // TIMEOUT_RETRIES_PASS - only run tests with the
332        // TEST_SLOW_TIMEOUT_SUBSTRING property (not benchmarks). These are the
333        // test_slow_timeout* tests that time out but pass.
334        if properties.contains(RunProperties::TIMEOUT_RETRIES_PASS) {
335            // Skip if not SLOW_TIMEOUT or if it's a benchmark.
336            return !self.has_property(TestCaseFixtureProperties::TEST_SLOW_TIMEOUT_SUBSTRING)
337                || self.has_property(TestCaseFixtureProperties::IS_BENCHMARK);
338        }
339
340        // TIMEOUT_RETRIES_FLAKY - only run the flaky slow timeout test.
341        if properties.contains(RunProperties::TIMEOUT_RETRIES_FLAKY) {
342            return !self.has_property(TestCaseFixtureProperties::FLAKY_SLOW_TIMEOUT_SUBSTRING);
343        }
344
345        // WITH_TERMINATION - only run test_slow_timeout* tests (they time out).
346        if properties.contains(RunProperties::WITH_TERMINATION) {
347            return !self.has_property(TestCaseFixtureProperties::TEST_SLOW_TIMEOUT_SUBSTRING)
348                || self.has_property(TestCaseFixtureProperties::IS_BENCHMARK);
349        }
350
351        // WITH_TIMEOUT_SUCCESS - only run test_slow_timeout* tests.
352        if properties.contains(RunProperties::WITH_TIMEOUT_SUCCESS) {
353            return !self.has_property(TestCaseFixtureProperties::TEST_SLOW_TIMEOUT_SUBSTRING)
354                || self.has_property(TestCaseFixtureProperties::IS_BENCHMARK);
355        }
356
357        // RUN_IGNORED_ONLY: run only ignored tests, excluding slow timeout
358        // tests.
359        if properties.contains(RunProperties::RUN_IGNORED_ONLY) {
360            // Skip slow timeout tests (filtered out in the test).
361            if self.has_property(TestCaseFixtureProperties::SLOW_TIMEOUT_SUBSTRING) {
362                return true;
363            }
364            // Skip non-ignored tests.
365            if !self.status.is_ignored() {
366                return true;
367            }
368            // Run other ignored tests.
369            return false;
370        }
371
372        // Ignored tests are skipped by this test suite.
373        if self.status.is_ignored() {
374            return true;
375        }
376
377        false
378    }
379
380    /// Determines the expected test result based on test status and run
381    /// properties.
382    ///
383    /// Returns both the expected outcome and the expected rerun behavior.
384    pub fn expected_result(&self, properties: RunProperties) -> ExpectedTestResult {
385        let result = self.expected_check_result(properties);
386        let expected_reruns = self.expected_reruns(result, properties);
387        ExpectedTestResult {
388            result,
389            expected_reruns,
390        }
391    }
392
393    fn expected_check_result(&self, properties: RunProperties) -> CheckResult {
394        // BenchOverrideTimeout - the benchmark times out due to override.
395        if self.has_property(TestCaseFixtureProperties::BENCH_OVERRIDE_TIMEOUT)
396            && properties.contains(RunProperties::BENCH_OVERRIDE_TIMEOUT)
397        {
398            return CheckResult::Timeout;
399        }
400
401        // BenchTermination - the benchmark times out due to bench.slow-timeout.
402        if self.has_property(TestCaseFixtureProperties::BENCH_TERMINATION)
403            && properties.contains(RunProperties::BENCH_TERMINATION)
404        {
405            return CheckResult::Timeout;
406        }
407
408        // BenchIgnoresTestTimeout - the benchmark passes because it uses
409        // bench.slow-timeout (30 years default) instead of slow-timeout.
410        if self.has_property(TestCaseFixtureProperties::BENCH_IGNORES_TEST_TIMEOUT)
411            && properties.contains(RunProperties::BENCH_IGNORES_TEST_TIMEOUT)
412        {
413            return CheckResult::Pass;
414        }
415
416        // TIMEOUT_RETRIES_PASS - tests time out but pass due to on-timeout=pass.
417        // The output shows PASS, not TIMEOUT.
418        if self.has_property(TestCaseFixtureProperties::SLOW_TIMEOUT_SUBSTRING)
419            && properties.contains(RunProperties::TIMEOUT_RETRIES_PASS)
420        {
421            return CheckResult::Pass;
422        }
423
424        // WITH_TERMINATION - all test_slow_timeout* tests time out.
425        if self.has_property(TestCaseFixtureProperties::TEST_SLOW_TIMEOUT_SUBSTRING)
426            && properties.contains(RunProperties::WITH_TERMINATION)
427        {
428            return CheckResult::Timeout;
429        }
430
431        // WITH_TIMEOUT_SUCCESS - test_slow_timeout passes (on-timeout = "pass"),
432        // while other test_slow_timeout* tests fail.
433        if properties.contains(RunProperties::WITH_TIMEOUT_SUCCESS) {
434            if self.has_property(TestCaseFixtureProperties::EXACT_TEST_SLOW_TIMEOUT) {
435                // test_slow_timeout has on-timeout = "pass" override.
436                return CheckResult::Pass;
437            }
438            if self.has_property(TestCaseFixtureProperties::TEST_SLOW_TIMEOUT_SUBSTRING) {
439                // Other test_slow_timeout* tests time out normally.
440                return CheckResult::Timeout;
441            }
442        }
443
444        match self.status {
445            TestCaseFixtureStatus::Pass => {
446                // NeedsSameCwd tests fail when relocated.
447                if self.has_property(TestCaseFixtureProperties::NEEDS_SAME_CWD)
448                    && properties.contains(RunProperties::RELOCATED)
449                {
450                    CheckResult::Fail
451                } else {
452                    CheckResult::Pass
453                }
454            }
455            TestCaseFixtureStatus::Leak => CheckResult::Leak,
456            TestCaseFixtureStatus::LeakFail => CheckResult::LeakFail,
457            TestCaseFixtureStatus::Fail => CheckResult::Fail,
458            TestCaseFixtureStatus::Flaky { .. } => {
459                // CLI --flaky-result overrides all config-level settings.
460                if properties.contains(RunProperties::WITH_CLI_FLAKY_RESULT_FAIL) {
461                    return CheckResult::FlakyFail;
462                }
463                if properties.contains(RunProperties::WITH_CLI_FLAKY_RESULT_PASS) {
464                    return CheckResult::Pass;
465                }
466                // With retries and flaky-result = "fail", flaky tests that eventually
467                // pass are still counted as failures.
468                if properties.contains(RunProperties::WITH_RETRIES_FLAKY_FAIL) {
469                    if self.has_property(TestCaseFixtureProperties::FLAKY_RESULT_FAIL_JUNIT_SUCCESS)
470                    {
471                        return CheckResult::FlakyFailJunitSuccess;
472                    } else if self.has_property(TestCaseFixtureProperties::FLAKY_RESULT_FAIL) {
473                        return CheckResult::FlakyFail;
474                    } else {
475                        return CheckResult::Pass;
476                    }
477                }
478                // With retries, flaky tests eventually pass. (Retries are
479                // configured in a way which ensures that all tests eventually
480                // pass.)
481                if properties.contains(RunProperties::WITH_RETRIES) {
482                    CheckResult::Pass
483                } else {
484                    CheckResult::Fail
485                }
486            }
487            TestCaseFixtureStatus::FailLeak => CheckResult::FailLeak,
488            TestCaseFixtureStatus::Segfault => {
489                // On Unix, segfaults aren't passed through by the passthrough runner.
490                // They show as regular failures instead of aborts.
491                if cfg!(unix) && properties.contains(RunProperties::WITH_TARGET_RUNNER) {
492                    CheckResult::Fail
493                } else {
494                    CheckResult::Abort
495                }
496            }
497            TestCaseFixtureStatus::IgnoredPass => {
498                if properties.contains(RunProperties::RUN_IGNORED_ONLY) {
499                    CheckResult::Pass
500                } else {
501                    unreachable!("ignored tests should be filtered out")
502                }
503            }
504            TestCaseFixtureStatus::IgnoredFail => {
505                if properties.contains(RunProperties::RUN_IGNORED_ONLY) {
506                    CheckResult::Fail
507                } else {
508                    unreachable!("ignored tests should be filtered out")
509                }
510            }
511            TestCaseFixtureStatus::IgnoredFlaky { .. } => {
512                // TIMEOUT_RETRIES_FLAKY: the test fails several times, then
513                // times out and passes due to on-timeout=pass.
514                if properties.contains(RunProperties::TIMEOUT_RETRIES_FLAKY) {
515                    CheckResult::Pass
516                } else if properties.contains(RunProperties::RUN_IGNORED_ONLY) {
517                    CheckResult::Fail
518                } else {
519                    unreachable!("ignored tests should be filtered out")
520                }
521            }
522        }
523    }
524
525    /// Computes the expected rerun behavior for a test case based on its
526    /// fixture status, the check result, and the run properties.
527    fn expected_reruns(&self, result: CheckResult, properties: RunProperties) -> ExpectedReruns {
528        // Flaky tests that eventually pass have a known rerun count.
529        // This applies both to flaky-pass (CheckResult::Pass) and
530        // flaky-fail (CheckResult::FlakyFail) — either way, the test ran
531        // pass_attempt - 1 failing attempts before the passing one.
532        if let TestCaseFixtureStatus::Flaky { pass_attempt }
533        | TestCaseFixtureStatus::IgnoredFlaky { pass_attempt } = self.status
534            && (result == CheckResult::Pass
535                || result == CheckResult::FlakyFail
536                || result == CheckResult::FlakyFailJunitSuccess)
537        {
538            debug_assert!(
539                pass_attempt >= 2,
540                "pass_attempt must be >= 2 for a flaky test"
541            );
542            return ExpectedReruns::FlakyRunCount((pass_attempt - 1) as usize);
543        }
544
545        // Failing tests with retries configured will have reruns, but the exact
546        // count depends on per-test profile overrides which the fixture data
547        // model doesn't track.
548        let has_retries = properties.intersects(
549            RunProperties::WITH_RETRIES
550                | RunProperties::WITH_RETRIES_FLAKY_FAIL
551                | RunProperties::WITH_CLI_FLAKY_RESULT_FAIL
552                | RunProperties::WITH_CLI_FLAKY_RESULT_PASS,
553        );
554        if has_retries && result.is_failure() {
555            return ExpectedReruns::SomeReruns;
556        }
557
558        ExpectedReruns::None
559    }
560}
561
562#[derive(Clone, Debug)]
563pub struct TestNameAndFilterMatch<'a> {
564    pub name: &'a TestCaseName,
565    pub filter_match: FilterMatch,
566}
567
568impl<'a> IdOrdItem for TestNameAndFilterMatch<'a> {
569    type Key<'k>
570        = &'a TestCaseName
571    where
572        Self: 'k;
573    fn key(&self) -> Self::Key<'_> {
574        self.name
575    }
576    id_upcast!();
577}
578
579// This isn't great, but it is the easiest way to compare an IdOrdMap of
580// TestFixture with an IdOrdMap of TestNameAndFilterMatch.
581impl PartialEq<TestNameAndFilterMatch<'_>> for TestCaseFixture {
582    fn eq(&self, other: &TestNameAndFilterMatch<'_>) -> bool {
583        self.name == *other.name && self.status.is_ignored() != other.filter_match.is_match()
584    }
585}
586
587#[derive(Copy, Clone, Debug, Eq, PartialEq)]
588pub enum TestCaseFixtureStatus {
589    Pass,
590    Fail,
591    Flaky {
592        pass_attempt: u32,
593    },
594    Leak,
595    LeakFail,
596    FailLeak,
597    Segfault,
598    IgnoredPass,
599    IgnoredFail,
600    /// An ignored test that is flaky: it fails `pass_attempt - 1` times, then
601    /// passes on attempt `pass_attempt`.
602    IgnoredFlaky {
603        pass_attempt: u32,
604    },
605}
606
607impl TestCaseFixtureStatus {
608    pub fn is_ignored(self) -> bool {
609        match self {
610            TestCaseFixtureStatus::IgnoredPass
611            | TestCaseFixtureStatus::IgnoredFail
612            | TestCaseFixtureStatus::IgnoredFlaky { .. } => true,
613            TestCaseFixtureStatus::Pass
614            | TestCaseFixtureStatus::Fail
615            | TestCaseFixtureStatus::Flaky { .. }
616            | TestCaseFixtureStatus::Leak
617            | TestCaseFixtureStatus::LeakFail
618            | TestCaseFixtureStatus::FailLeak
619            | TestCaseFixtureStatus::Segfault => false,
620        }
621    }
622}
623
624bitflags::bitflags! {
625    #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash)]
626    pub struct TestCaseFixtureProperties: u64 {
627        const NEEDS_SAME_CWD = 0x1;
628        const NOT_IN_DEFAULT_SET = 0x2;
629        const MATCHES_CDYLIB = 0x4;
630        const MATCHES_TEST_MULTIPLY_TWO = 0x8;
631        const NOT_IN_DEFAULT_SET_UNIX = 0x10;
632        const IS_BENCHMARK = 0x20;
633        /// Benchmark that times out with the with-bench-override profile.
634        const BENCH_OVERRIDE_TIMEOUT = 0x40;
635        /// Benchmark that times out with the with-bench-termination profile.
636        const BENCH_TERMINATION = 0x80;
637        /// Benchmark that passes with the with-test-termination-only profile.
638        const BENCH_IGNORES_TEST_TIMEOUT = 0x100;
639        /// Test with "slow_timeout" as a substring.
640        const SLOW_TIMEOUT_SUBSTRING = 0x200;
641        /// Test with "test_slow_timeout" as a substring.
642        const TEST_SLOW_TIMEOUT_SUBSTRING = 0x400;
643        /// Test with "flaky_slow_timeout" as a substring.
644        const FLAKY_SLOW_TIMEOUT_SUBSTRING = 0x800;
645        /// Exactly test_slow_timeout (not test_slow_timeout_2 or test_slow_timeout_subprocess).
646        const EXACT_TEST_SLOW_TIMEOUT = 0x1000;
647        /// Flaky test configured with `flaky-result = "fail"`.
648        const FLAKY_RESULT_FAIL = 0x2000;
649        /// Flaky test configured with `flaky-result = "fail"` and
650        /// `junit.flaky-fail-status = "success"`.
651        const FLAKY_RESULT_FAIL_JUNIT_SUCCESS = 0x4000;
652    }
653}