nextest_runner/reporter/structured/libtest.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800
// Copyright (c) The nextest Contributors
// SPDX-License-Identifier: MIT OR Apache-2.0
//! libtest compatible output support
//!
//! Before 1.70.0 it was possible to send `--format json` to test executables and
//! they would print out a JSON line to stdout for various events. This format
//! was however not intended to be stabilized, so 1.70.0 made it nightly only as
//! intended. However, machine readable output is immensely useful to other
//! tooling that can much more easily consume it than parsing the output meant
//! for humans.
//!
//! Since there already existed tooling using the libtest output format, this
//! event aggregator replicates that format so that projects can seamlessly
//! integrate cargo-nextest into their project, as well as get the benefit of
//! running their tests on stable instead of being forced to use nightly.
//!
//! This implementation will attempt to follow the libtest format as it changes,
//! but the rate of changes is quite low (see <https://github.com/rust-lang/rust/blob/master/library/test/src/formatters/json.rs>)
//! so this should not be a big issue to users, however, if the format is changed,
//! the changes will be replicated in this file with a new minor version allowing
//! users to move to the new format or stick to the format version(s) they were
//! using before
use super::{TestEvent, WriteEventError};
use crate::{
errors::{DisplayErrorChain, FormatVersionError, FormatVersionErrorInner},
list::RustTestSuite,
reporter::TestEventKind,
runner::ExecutionResult,
test_output::{ChildExecutionOutput, ChildOutput, ChildSingleOutput},
};
use bstr::ByteSlice;
use nextest_metadata::MismatchReason;
use std::{collections::BTreeMap, fmt::Write as _};
/// To support pinning the version of the output, we just use this simple enum
/// to document changes as libtest output changes
#[derive(Copy, Clone)]
#[repr(u8)]
enum FormatMinorVersion {
/// The libtest output as of `rustc 1.75.0-nightly (aa1a71e9e 2023-10-26)` with `--format json --report-time`
///
/// * `{ "type": "suite", "event": "started", "test_count": <u32> }` - Start of a test binary run, always printed
/// * `{ "type": "test", "event": "started", "name": "<name>" }` - Start of a single test, always printed
/// * `{ "type": "test", "name": "<name>", "event": "ignored" }` - Printed if a test is ignored
/// * Will have an additional `"message" = "<message>"` field if the there is a message in the ignore attribute eg. `#[ignore = "not yet implemented"]`
/// * `{ "type": "test", "name": "<name>", "event": "ok", "exec_time": <f32> }` - Printed if a test runs successfully
/// * `{ "type": "test", "name": "<name>", "event": "failed", "exec_time": <f32>, "stdout": "<escaped output collected during test execution>" }` - Printed if a test fails, note the stdout field actually contains both stdout and stderr despite the name
/// * If `--ensure-time` is passed, libtest will add `"reason": "time limit exceeded"` if the test passes, but exceeds the time limit.
/// * If `#[should_panic = "<expected message>"]` is used and message doesn't match, an additional `"message": "panic did not contain expected string\n<panic message>"` field is added
/// * `{ "type": "suite", "event": "<overall_status>", "passed": <u32>, "failed": <u32>, "ignored": <u32>, "measured": <u32>, "filtered_out": <u32>, "exec_time": <f32> }`
/// * `event` will be `"ok"` if no failures occurred, or `"failed"` if `"failed" > 0`
/// * `ignored` will be > 0 if there are `#[ignore]` tests and `--ignored` was not passed
/// * `filtered_out` with be > 0 if there were tests not marked `#[ignore]` and `--ignored` was passed OR a test filter was passed and 1 or more tests were not executed
/// * `measured` is only > 0 if running benchmarks
First = 1,
#[doc(hidden)]
_Max,
}
/// If libtest output is ever stabilized, this would most likely become the single
/// version and we could get rid of the minor version, but who knows if that
/// will ever happen
#[derive(Copy, Clone)]
#[repr(u8)]
enum FormatMajorVersion {
/// The libtest output is unstable
Unstable = 0,
#[doc(hidden)]
_Max,
}
/// The accumulated stats for a single test binary
struct LibtestSuite<'cfg> {
/// The number of tests that failed
failed: usize,
/// The number of tests that succeeded
succeeded: usize,
/// The number of tests that were ignored
ignored: usize,
/// The number of tests that were not executed due to filters
filtered: usize,
/// The number of tests in this suite that are still running
running: usize,
meta: &'cfg RustTestSuite<'cfg>,
/// The accumulated duration of every test that has been executed
total: std::time::Duration,
/// Libtest outputs outputs a `started` event for every test that isn't
/// filtered, including ignored tests, then outputs `ignored` events after
/// all the started events, so we just mimic that with a temporary buffer
ignore_block: Option<bytes::BytesMut>,
/// The single block of output accumulated for all tests executed in the binary,
/// this needs to be emitted as a single block to emulate how cargo test works,
/// executing each test binary serially and outputting a json line for each
/// event, as otherwise consumers would not be able to associate a single test
/// with its parent suite
output_block: bytes::BytesMut,
}
/// Determines whether the `nextest` subobject is added with additional metadata
/// to events
#[derive(Copy, Clone, Debug)]
pub enum EmitNextestObject {
/// The `nextest` subobject is added
Yes,
/// The `nextest` subobject is not added
No,
}
const KIND_TEST: &str = "test";
const KIND_SUITE: &str = "suite";
const EVENT_STARTED: &str = "started";
const EVENT_IGNORED: &str = "ignored";
const EVENT_OK: &str = "ok";
const EVENT_FAILED: &str = "failed";
#[inline]
fn fmt_err(err: std::fmt::Error) -> WriteEventError {
WriteEventError::Io(std::io::Error::new(std::io::ErrorKind::OutOfMemory, err))
}
/// A reporter that reports test runs in the same line-by-line JSON format as
/// libtest itself
pub struct LibtestReporter<'cfg> {
_minor: FormatMinorVersion,
_major: FormatMajorVersion,
test_suites: BTreeMap<&'cfg str, LibtestSuite<'cfg>>,
/// If true, we emit a `nextest` subobject with additional metadata in it
/// that consumers can use for easier integration if they wish
emit_nextest_obj: bool,
}
impl<'cfg> LibtestReporter<'cfg> {
/// Creates a new libtest reporter
///
/// The version string is used to allow the reporter to evolve along with
/// libtest, but still be able to output a stable format for consumers. If
/// it is not specified the latest version of the format will be produced.
///
/// If [`EmitNextestObject::Yes`] is passed, an additional `nextest` subobject
/// will be added to some events that includes additional metadata not produced
/// by libtest, but most consumers should still be able to consume them as
/// the base format itself is not changed
pub fn new(
version: Option<&str>,
emit_nextest_obj: EmitNextestObject,
) -> Result<Self, FormatVersionError> {
let emit_nextest_obj = matches!(emit_nextest_obj, EmitNextestObject::Yes);
let Some(version) = version else {
return Ok(Self {
_minor: FormatMinorVersion::First,
_major: FormatMajorVersion::Unstable,
test_suites: BTreeMap::new(),
emit_nextest_obj,
});
};
let Some((major, minor)) = version.split_once('.') else {
return Err(FormatVersionError {
input: version.into(),
error: FormatVersionErrorInner::InvalidFormat {
expected: "<major>.<minor>",
},
});
};
let major: u8 = major.parse().map_err(|err| FormatVersionError {
input: version.into(),
error: FormatVersionErrorInner::InvalidInteger {
which: "major",
err,
},
})?;
let minor: u8 = minor.parse().map_err(|err| FormatVersionError {
input: version.into(),
error: FormatVersionErrorInner::InvalidInteger {
which: "minor",
err,
},
})?;
let major = match major {
0 => FormatMajorVersion::Unstable,
o => {
return Err(FormatVersionError {
input: version.into(),
error: FormatVersionErrorInner::InvalidValue {
which: "major",
value: o,
range: (FormatMajorVersion::Unstable as u8)
..(FormatMajorVersion::_Max as u8),
},
});
}
};
let minor = match minor {
1 => FormatMinorVersion::First,
o => {
return Err(FormatVersionError {
input: version.into(),
error: FormatVersionErrorInner::InvalidValue {
which: "minor",
value: o,
range: (FormatMinorVersion::First as u8)..(FormatMinorVersion::_Max as u8),
},
});
}
};
Ok(Self {
_major: major,
_minor: minor,
test_suites: BTreeMap::new(),
emit_nextest_obj,
})
}
pub(crate) fn write_event(&mut self, event: &TestEvent<'cfg>) -> Result<(), WriteEventError> {
let mut retries = None;
// Write the pieces of data that are the same across all events
let (kind, eve, test_instance) = match &event.kind {
TestEventKind::TestStarted { test_instance, .. } => {
(KIND_TEST, EVENT_STARTED, test_instance)
}
TestEventKind::TestSkipped {
test_instance,
reason: MismatchReason::Ignored,
} => {
// Note: unfortunately, libtest does not expose the message test in `#[ignore = "<message>"]`
// so we can't replicate the behavior of libtest exactly by emitting
// that message as additional metadata
(KIND_TEST, EVENT_STARTED, test_instance)
}
TestEventKind::TestFinished {
test_instance,
run_statuses,
..
} => {
if run_statuses.len() > 1 {
retries = Some(run_statuses.len());
}
(
KIND_TEST,
match run_statuses.last_status().result {
ExecutionResult::Pass | ExecutionResult::Leak => EVENT_OK,
ExecutionResult::Fail { .. }
| ExecutionResult::ExecFail
| ExecutionResult::Timeout => EVENT_FAILED,
},
test_instance,
)
}
TestEventKind::RunFinished { .. } => {
for test_suite in std::mem::take(&mut self.test_suites).into_values() {
self.finalize(test_suite)?;
}
return Ok(());
}
_ => return Ok(()),
};
let suite_info = test_instance.suite_info;
let crate_name = suite_info.package.name();
let binary_name = &suite_info.binary_name;
// Emit the suite start if this is the first test of the suite
let test_suite = match self.test_suites.entry(suite_info.binary_id.as_str()) {
std::collections::btree_map::Entry::Vacant(e) => {
let (running, ignored, filtered) =
suite_info.status.test_cases().fold((0, 0, 0), |acc, tc| {
if tc.1.ignored {
(acc.0, acc.1 + 1, acc.2)
} else if tc.1.filter_match.is_match() {
(acc.0 + 1, acc.1, acc.2)
} else {
(acc.0, acc.1, acc.2 + 1)
}
});
let mut out = bytes::BytesMut::with_capacity(1024);
write!(
&mut out,
r#"{{"type":"{KIND_SUITE}","event":"{EVENT_STARTED}","test_count":{}"#,
running + ignored,
)
.map_err(fmt_err)?;
if self.emit_nextest_obj {
write!(
&mut out,
r#","nextest":{{"crate":"{crate_name}","test_binary":"{binary_name}","kind":"{}"}}"#,
suite_info.kind,
)
.map_err(fmt_err)?;
}
out.extend_from_slice(b"}\n");
e.insert(LibtestSuite {
running,
failed: 0,
succeeded: 0,
ignored,
filtered,
meta: test_instance.suite_info,
total: std::time::Duration::new(0, 0),
ignore_block: None,
output_block: out,
})
}
std::collections::btree_map::Entry::Occupied(e) => e.into_mut(),
};
let out = &mut test_suite.output_block;
// After all the tests have been started or ignored, put the block of
// tests that were ignored just as libtest does
if matches!(event.kind, TestEventKind::TestFinished { .. }) {
if let Some(ib) = test_suite.ignore_block.take() {
out.extend_from_slice(&ib);
}
}
// This is one place where we deviate from the behavior of libtest, by
// always prefixing the test name with both the crate and the binary name,
// as this information is quite important to distinguish tests from each
// other when testing inside a large workspace with hundreds or thousands
// of tests
//
// Additionally, a `#<n>` is used as a suffix if the test was retried,
// as libtest does not support that functionality
write!(
out,
r#"{{"type":"{kind}","event":"{eve}","name":"{}::{}${}"#,
suite_info.package.name(),
suite_info.binary_name,
test_instance.name,
)
.map_err(fmt_err)?;
if let Some(retry_count) = retries {
write!(out, "#{retry_count}\"").map_err(fmt_err)?;
} else {
out.extend_from_slice(b"\"");
}
match &event.kind {
TestEventKind::TestFinished { run_statuses, .. } => {
let last_status = run_statuses.last_status();
test_suite.total += last_status.time_taken;
test_suite.running -= 1;
// libtest actually requires an additional `--report-time` flag to be
// passed for the exec_time information to be written. This doesn't
// really make sense when outputting structured output so we emit it
// unconditionally
write!(
out,
r#","exec_time":{}"#,
last_status.time_taken.as_secs_f64()
)
.map_err(fmt_err)?;
match last_status.result {
ExecutionResult::Fail { .. } | ExecutionResult::ExecFail => {
test_suite.failed += 1;
// Write the output from the test into the `stdout` (even
// though it could contain stderr output as well).
write!(out, r#","stdout":""#).map_err(fmt_err)?;
strip_human_output_from_failed_test(
&last_status.output,
out,
test_instance.name,
)?;
out.extend_from_slice(b"\"");
}
ExecutionResult::Timeout => {
test_suite.failed += 1;
out.extend_from_slice(br#","reason":"time limit exceeded""#);
}
_ => {
test_suite.succeeded += 1;
}
}
}
TestEventKind::TestSkipped { .. } => {
test_suite.running -= 1;
if test_suite.ignore_block.is_none() {
test_suite.ignore_block = Some(bytes::BytesMut::with_capacity(1024));
}
let ib = test_suite
.ignore_block
.get_or_insert_with(|| bytes::BytesMut::with_capacity(1024));
writeln!(
ib,
r#"{{"type":"{kind}","event":"{EVENT_IGNORED}","name":"{}::{}${}"}}"#,
suite_info.package.name(),
suite_info.binary_name,
test_instance.name,
)
.map_err(fmt_err)?;
}
_ => {}
};
out.extend_from_slice(b"}\n");
// If this is the last test of the suite, emit the test suite summary
// before emitting the entire block
if test_suite.running > 0 {
return Ok(());
}
if let Some(test_suite) = self.test_suites.remove(suite_info.binary_id.as_str()) {
self.finalize(test_suite)?;
}
Ok(())
}
fn finalize(&self, mut test_suite: LibtestSuite) -> Result<(), WriteEventError> {
let event = if test_suite.failed > 0 {
EVENT_FAILED
} else {
EVENT_OK
};
let out = &mut test_suite.output_block;
let suite_info = test_suite.meta;
// It's possible that a test failure etc has cancelled the run, in which
// case we might still have tests that are "running", even ones that are
// actually skipped, so we just add those to the filtered list
if test_suite.running > 0 {
test_suite.filtered += test_suite.running;
}
write!(
out,
r#"{{"type":"{KIND_SUITE}","event":"{event}","passed":{},"failed":{},"ignored":{},"measured":0,"filtered_out":{},"exec_time":{}"#,
test_suite.succeeded,
test_suite.failed,
test_suite.ignored,
test_suite.filtered,
test_suite.total.as_secs_f64(),
)
.map_err(fmt_err)?;
if self.emit_nextest_obj {
let crate_name = suite_info.package.name();
let binary_name = &suite_info.binary_name;
write!(
out,
r#","nextest":{{"crate":"{crate_name}","test_binary":"{binary_name}","kind":"{}"}}"#,
suite_info.kind,
)
.map_err(fmt_err)?;
}
out.extend_from_slice(b"}\n");
{
use std::io::Write as _;
let mut stdout = std::io::stdout().lock();
stdout.write_all(out).map_err(WriteEventError::Io)?;
stdout.flush().map_err(WriteEventError::Io)?;
}
Ok(())
}
}
/// Unfortunately, to replicate the libtest json output, we need to do our own
/// filtering of the output to strip out the data emitted by libtest in the
/// human format.
///
/// This function relies on the fact that nextest runs every individual test in
/// isolation.
fn strip_human_output_from_failed_test(
output: &ChildExecutionOutput,
out: &mut bytes::BytesMut,
test_name: &str,
) -> Result<(), WriteEventError> {
match output {
ChildExecutionOutput::Output {
result: _,
output,
errors,
} => {
match output {
ChildOutput::Combined { output } => {
strip_human_stdout_or_combined(output, out, test_name)?;
}
ChildOutput::Split(split) => {
// This is not a case that we hit because we always set CaptureStrategy to Combined. But
// handle it in a reasonable fashion. (We do have a unit test for this case, so gate the
// assertion with cfg(not(test)).)
#[cfg(not(test))]
{
debug_assert!(false, "libtest output requires CaptureStrategy::Combined");
}
if let Some(stdout) = &split.stdout {
if !stdout.is_empty() {
write!(out, "--- STDOUT ---\\n").map_err(fmt_err)?;
strip_human_stdout_or_combined(stdout, out, test_name)?;
}
} else {
write!(out, "(stdout not captured)").map_err(fmt_err)?;
}
// If stderr is not empty, just write all of it in.
if let Some(stderr) = &split.stderr {
if !stderr.is_empty() {
write!(out, "\\n--- STDERR ---\\n").map_err(fmt_err)?;
write!(out, "{}", EscapedString(stderr.as_str_lossy()))
.map_err(fmt_err)?;
}
} else {
writeln!(out, "\\n(stderr not captured)").map_err(fmt_err)?;
}
}
}
if let Some(errors) = errors {
write!(out, "\\n--- EXECUTION ERRORS ---\\n").map_err(fmt_err)?;
write!(
out,
"{}",
EscapedString(&DisplayErrorChain::new(errors).to_string())
)
.map_err(fmt_err)?;
}
}
ChildExecutionOutput::StartError(error) => {
write!(out, "--- EXECUTION ERROR ---\\n").map_err(fmt_err)?;
write!(
out,
"{}",
EscapedString(&DisplayErrorChain::new(error).to_string())
)
.map_err(fmt_err)?;
}
}
Ok(())
}
fn strip_human_stdout_or_combined(
output: &ChildSingleOutput,
out: &mut bytes::BytesMut,
test_name: &str,
) -> Result<(), WriteEventError> {
if output.buf.contains_str("running 1 test\n") {
// This is most likely the default test harness.
let lines = output
.lines()
.skip_while(|line| line != b"running 1 test")
.skip(1)
.take_while(|line| {
if let Some(name) = line
.strip_prefix(b"test ")
.and_then(|np| np.strip_suffix(b" ... FAILED"))
{
if test_name.as_bytes() == name {
return false;
}
}
true
})
.map(|line| line.to_str_lossy());
for line in lines {
// This will never fail unless we are OOM
write!(out, "{}\\n", EscapedString(&line)).map_err(fmt_err)?;
}
} else {
// This is most likely a custom test harness. Just write out the entire
// output.
write!(out, "{}", EscapedString(output.as_str_lossy())).map_err(fmt_err)?;
}
Ok(())
}
/// Copy of the same string escaper used in libtest
///
/// <https://github.com/rust-lang/rust/blob/f440b5f0ea042cb2087a36631b20878f9847ee28/library/test/src/formatters/json.rs#L222-L285>
struct EscapedString<'s>(&'s str);
impl std::fmt::Display for EscapedString<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> ::std::fmt::Result {
let mut start = 0;
let s = self.0;
for (i, byte) in s.bytes().enumerate() {
let escaped = match byte {
b'"' => "\\\"",
b'\\' => "\\\\",
b'\x00' => "\\u0000",
b'\x01' => "\\u0001",
b'\x02' => "\\u0002",
b'\x03' => "\\u0003",
b'\x04' => "\\u0004",
b'\x05' => "\\u0005",
b'\x06' => "\\u0006",
b'\x07' => "\\u0007",
b'\x08' => "\\b",
b'\t' => "\\t",
b'\n' => "\\n",
b'\x0b' => "\\u000b",
b'\x0c' => "\\f",
b'\r' => "\\r",
b'\x0e' => "\\u000e",
b'\x0f' => "\\u000f",
b'\x10' => "\\u0010",
b'\x11' => "\\u0011",
b'\x12' => "\\u0012",
b'\x13' => "\\u0013",
b'\x14' => "\\u0014",
b'\x15' => "\\u0015",
b'\x16' => "\\u0016",
b'\x17' => "\\u0017",
b'\x18' => "\\u0018",
b'\x19' => "\\u0019",
b'\x1a' => "\\u001a",
b'\x1b' => "\\u001b",
b'\x1c' => "\\u001c",
b'\x1d' => "\\u001d",
b'\x1e' => "\\u001e",
b'\x1f' => "\\u001f",
b'\x7f' => "\\u007f",
_ => {
continue;
}
};
if start < i {
f.write_str(&s[start..i])?;
}
f.write_str(escaped)?;
start = i + 1;
}
if start != self.0.len() {
f.write_str(&s[start..])?;
}
Ok(())
}
}
#[cfg(test)]
mod test {
use crate::{
errors::ChildStartError,
reporter::structured::libtest::strip_human_output_from_failed_test,
test_output::{ChildExecutionOutput, ChildOutput, ChildSplitOutput},
};
use bytes::BytesMut;
use color_eyre::eyre::eyre;
use std::{io, sync::Arc};
/// Validates that the human output portion from a failed test is stripped
/// out when writing a JSON string, as it is not part of the output when
/// libtest itself outputs the JSON, so we have 100% identical output to libtest
#[test]
fn strips_human_output() {
const TEST_OUTPUT: &[&str] = &[
"\n",
"running 1 test\n",
"[src/index.rs:185] \"boop\" = \"boop\"\n",
"this is stdout\n",
"this i stderr\nok?\n",
"thread 'index::test::download_url_crates_io'",
r#" panicked at src/index.rs:206:9:
oh no
stack backtrace:
0: rust_begin_unwind
at /rustc/a28077b28a02b92985b3a3faecf92813155f1ea1/library/std/src/panicking.rs:597:5
1: core::panicking::panic_fmt
at /rustc/a28077b28a02b92985b3a3faecf92813155f1ea1/library/core/src/panicking.rs:72:14
2: tame_index::index::test::download_url_crates_io
at ./src/index.rs:206:9
3: tame_index::index::test::download_url_crates_io::{{closure}}
at ./src/index.rs:179:33
4: core::ops::function::FnOnce::call_once
at /rustc/a28077b28a02b92985b3a3faecf92813155f1ea1/library/core/src/ops/function.rs:250:5
5: core::ops::function::FnOnce::call_once
at /rustc/a28077b28a02b92985b3a3faecf92813155f1ea1/library/core/src/ops/function.rs:250:5
note: Some details are omitted, run with `RUST_BACKTRACE=full` for a verbose backtrace.
"#,
"test index::test::download_url_crates_io ... FAILED\n",
"\n\nfailures:\n\nfailures:\n index::test::download_url_crates_io\n\ntest result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 13 filtered out; finished in 0.01s\n",
];
let output = {
let mut acc = BytesMut::new();
for line in TEST_OUTPUT {
acc.extend_from_slice(line.as_bytes());
}
ChildOutput::Combined {
output: acc.freeze().into(),
}
};
let mut actual = bytes::BytesMut::new();
strip_human_output_from_failed_test(
&ChildExecutionOutput::Output {
result: None,
output,
errors: None,
},
&mut actual,
"index::test::download_url_crates_io",
)
.unwrap();
insta::assert_snapshot!(std::str::from_utf8(&actual).unwrap());
}
#[test]
fn strips_human_output_custom_test_harness() {
// For a custom test harness, we don't strip the human output at all.
const TEST_OUTPUT: &[&str] = &["\n", "this is a custom test harness!!!\n", "1 test passed"];
let output = {
let mut acc = BytesMut::new();
for line in TEST_OUTPUT {
acc.extend_from_slice(line.as_bytes());
}
ChildOutput::Combined {
output: acc.freeze().into(),
}
};
let mut actual = bytes::BytesMut::new();
strip_human_output_from_failed_test(
&ChildExecutionOutput::Output {
result: None,
output,
errors: None,
},
&mut actual,
"non-existent",
)
.unwrap();
insta::assert_snapshot!(std::str::from_utf8(&actual).unwrap());
}
#[test]
fn strips_human_output_start_error() {
let inner_error = eyre!("inner error");
let error = io::Error::new(io::ErrorKind::Other, inner_error);
let output = ChildExecutionOutput::StartError(ChildStartError::Spawn(Arc::new(error)));
let mut actual = bytes::BytesMut::new();
strip_human_output_from_failed_test(&output, &mut actual, "non-existent").unwrap();
insta::assert_snapshot!(std::str::from_utf8(&actual).unwrap());
}
#[test]
fn strips_human_output_none() {
let mut actual = bytes::BytesMut::new();
strip_human_output_from_failed_test(
&ChildExecutionOutput::Output {
result: None,
output: ChildOutput::Split(ChildSplitOutput {
stdout: None,
stderr: None,
}),
errors: None,
},
&mut actual,
"non-existent",
)
.unwrap();
insta::assert_snapshot!(std::str::from_utf8(&actual).unwrap());
}
}