1use super::{
12 dicts,
13 format::{
14 CARGO_METADATA_JSON_PATH, OutputDict, RECORD_OPTS_JSON_PATH, RUN_LOG_FILE_NAME,
15 STDERR_DICT_PATH, STDOUT_DICT_PATH, STORE_ZIP_FILE_NAME, TEST_LIST_JSON_PATH,
16 stored_file_options, zstd_file_options,
17 },
18 summary::{
19 OutputEventKind, OutputFileName, OutputKind, RecordOpts, TestEventKindSummary,
20 TestEventSummary, ZipStoreOutput, ZipStoreOutputDescription,
21 },
22};
23use crate::{
24 errors::{RunStoreError, StoreWriterError},
25 output_spec::{LiveSpec, RecordingSpec},
26 record::format::{RERUN_INFO_JSON_PATH, RerunInfo},
27 reporter::events::{
28 ChildExecutionOutputDescription, ChildOutputDescription, ExecuteStatus, ExecutionStatuses,
29 SetupScriptExecuteStatus,
30 },
31 test_output::ChildSingleOutput,
32};
33use camino::{Utf8Path, Utf8PathBuf};
34use countio::Counter;
35use debug_ignore::DebugIgnore;
36use eazip::ArchiveWriter;
37use nextest_metadata::TestListSummary;
38use std::{
39 borrow::Cow,
40 collections::HashSet,
41 fs::File,
42 io::{self, Write},
43};
44
45struct LogEncoder {
56 inner: Option<Counter<zstd::stream::Encoder<'static, Counter<File>>>>,
59}
60
61impl std::fmt::Debug for LogEncoder {
62 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
63 f.debug_struct("LogEncoder").finish_non_exhaustive()
64 }
65}
66
67impl LogEncoder {
68 fn new(encoder: zstd::stream::Encoder<'static, Counter<File>>) -> Self {
69 Self {
70 inner: Some(Counter::new(encoder)),
71 }
72 }
73
74 fn finish(mut self, entries: u64) -> io::Result<ComponentSizes> {
78 let counter = self.inner.take().expect("encoder already finished");
79 let uncompressed = counter.writer_bytes() as u64;
80 let file_counter = counter.into_inner().finish()?;
81 let compressed = file_counter.writer_bytes() as u64;
82 Ok(ComponentSizes {
83 compressed,
84 uncompressed,
85 entries,
86 })
87 }
88}
89
90impl Write for LogEncoder {
91 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
92 self.inner
93 .as_mut()
94 .expect("encoder already finished")
95 .write(buf)
96 }
97
98 fn flush(&mut self) -> io::Result<()> {
99 self.inner
100 .as_mut()
101 .expect("encoder already finished")
102 .flush()
103 }
104}
105
106impl Drop for LogEncoder {
107 fn drop(&mut self) {
108 if let Some(counter) = self.inner.take() {
109 let _ = counter.into_inner().finish();
114 }
115 }
116}
117
118#[derive(Debug)]
123pub struct RunRecorder {
124 store_path: Utf8PathBuf,
125 store_writer: StoreWriter,
126 log_path: Utf8PathBuf,
127 log: DebugIgnore<LogEncoder>,
128 log_entries: u64,
130 max_output_size: usize,
131}
132
133impl RunRecorder {
134 pub(super) fn new(
139 run_dir: Utf8PathBuf,
140 max_output_size: bytesize::ByteSize,
141 ) -> Result<Self, RunStoreError> {
142 std::fs::create_dir_all(&run_dir).map_err(|error| RunStoreError::RunDirCreate {
143 run_dir: run_dir.clone(),
144 error,
145 })?;
146
147 let store_path = run_dir.join(STORE_ZIP_FILE_NAME);
148 let store_writer =
149 StoreWriter::new(&store_path).map_err(|error| RunStoreError::StoreWrite {
150 store_path: store_path.clone(),
151 error,
152 })?;
153
154 let log_path = run_dir.join(RUN_LOG_FILE_NAME);
155 let file = std::fs::OpenOptions::new()
156 .create(true)
157 .truncate(true)
158 .write(true)
159 .open(&log_path)
160 .map_err(|error| RunStoreError::RunLogCreate {
161 path: log_path.clone(),
162 error,
163 })?;
164
165 let encoder = zstd::stream::Encoder::new(Counter::new(file), 3).map_err(|error| {
169 RunStoreError::RunLogCreate {
170 path: log_path.clone(),
171 error,
172 }
173 })?;
174 let log = LogEncoder::new(encoder);
175
176 Ok(Self {
177 store_path,
178 store_writer,
179 log_path,
180 log: DebugIgnore(log),
181 log_entries: 0,
182 max_output_size: usize::try_from(max_output_size.as_u64()).unwrap_or(usize::MAX),
185 })
186 }
187
188 pub(crate) fn write_meta(
195 &mut self,
196 cargo_metadata_json: &str,
197 test_list: &TestListSummary,
198 opts: &RecordOpts,
199 ) -> Result<(), RunStoreError> {
200 let test_list_json = serde_json::to_string(test_list)
201 .map_err(|error| RunStoreError::TestListSerialize { error })?;
202
203 let opts_json = serde_json::to_string(opts)
204 .map_err(|error| RunStoreError::RecordOptionsSerialize { error })?;
205
206 self.write_archive_file(TEST_LIST_JSON_PATH, test_list_json.as_bytes())?;
207 self.write_archive_file(CARGO_METADATA_JSON_PATH, cargo_metadata_json.as_bytes())?;
208 self.write_archive_file(RECORD_OPTS_JSON_PATH, opts_json.as_bytes())?;
209
210 self.write_archive_file(STDOUT_DICT_PATH, dicts::STDOUT)?;
212 self.write_archive_file(STDERR_DICT_PATH, dicts::STDERR)?;
213
214 Ok(())
215 }
216
217 pub(crate) fn write_rerun_info(&mut self, rerun_info: &RerunInfo) -> Result<(), RunStoreError> {
221 let rerun_info_json = serde_json::to_string(rerun_info)
222 .map_err(|error| RunStoreError::RerunInfoSerialize { error })?;
223
224 self.write_archive_file(RERUN_INFO_JSON_PATH, rerun_info_json.as_bytes())?;
225
226 Ok(())
227 }
228
229 fn write_archive_file(&mut self, path: &str, bytes: &[u8]) -> Result<(), RunStoreError> {
230 self.store_writer
231 .add_file(Utf8PathBuf::from(path), bytes)
232 .map_err(|error| RunStoreError::StoreWrite {
233 store_path: self.store_path.clone(),
234 error,
235 })
236 }
237
238 pub(crate) fn write_event(
243 &mut self,
244 event: TestEventSummary<LiveSpec>,
245 ) -> Result<(), RunStoreError> {
246 let mut cx = SerializeTestEventContext {
247 store_writer: &mut self.store_writer,
248 max_output_size: self.max_output_size,
249 };
250
251 let event = cx
252 .convert_event(event)
253 .map_err(|error| RunStoreError::StoreWrite {
254 store_path: self.store_path.clone(),
255 error,
256 })?;
257
258 let json = serde_json::to_string(&event)
259 .map_err(|error| RunStoreError::TestEventSerialize { error })?;
260 self.write_log_impl(json.as_bytes())?;
261 self.write_log_impl(b"\n")?;
262
263 self.log_entries += 1;
264
265 Ok(())
266 }
267
268 fn write_log_impl(&mut self, bytes: &[u8]) -> Result<(), RunStoreError> {
269 self.log
270 .write_all(bytes)
271 .map_err(|error| RunStoreError::RunLogWrite {
272 path: self.log_path.clone(),
273 error,
274 })
275 }
276
277 pub(crate) fn finish(self) -> Result<StoreSizes, RunStoreError> {
282 let log_sizes =
283 self.log
284 .0
285 .finish(self.log_entries)
286 .map_err(|error| RunStoreError::RunLogFlush {
287 path: self.log_path.clone(),
288 error,
289 })?;
290
291 let store_sizes =
292 self.store_writer
293 .finish()
294 .map_err(|error| RunStoreError::StoreWrite {
295 store_path: self.store_path.clone(),
296 error,
297 })?;
298
299 Ok(StoreSizes {
300 log: log_sizes,
301 store: store_sizes,
302 })
303 }
304}
305
306#[derive(Debug)]
308pub(crate) struct StoreWriter {
309 writer: DebugIgnore<ArchiveWriter<Counter<File>>>,
310 added_files: HashSet<Utf8PathBuf>,
311 uncompressed_size: u64,
313}
314
315impl StoreWriter {
316 fn new(store_path: &Utf8Path) -> Result<Self, StoreWriterError> {
318 let zip_file = std::fs::OpenOptions::new()
319 .create(true)
320 .truncate(true)
321 .write(true)
322 .open(store_path)
323 .map_err(|error| StoreWriterError::Create { error })?;
324 let writer = ArchiveWriter::new(Counter::new(zip_file));
325
326 Ok(Self {
327 writer: DebugIgnore(writer),
328 added_files: HashSet::new(),
329 uncompressed_size: 0,
330 })
331 }
332
333 fn add_file(&mut self, path: Utf8PathBuf, contents: &[u8]) -> Result<(), StoreWriterError> {
340 if self.added_files.contains(&path) {
341 return Ok(());
342 }
343
344 self.uncompressed_size += contents.len() as u64;
346
347 let dict = OutputDict::for_path(&path);
348 match dict.dict_bytes() {
349 Some(dict_bytes) => {
350 let compressed = compress_with_dict(contents, dict_bytes)
353 .map_err(|error| StoreWriterError::Compress { error })?;
354
355 let options = stored_file_options();
356 self.writer
357 .add_file(path.as_str(), &compressed[..], &options)
358 .map_err(|error| StoreWriterError::Write {
359 path: path.clone(),
360 error,
361 })?;
362 }
363 None => {
364 let options = zstd_file_options();
366 self.writer
367 .add_file(path.as_str(), contents, &options)
368 .map_err(|error| StoreWriterError::Write {
369 path: path.clone(),
370 error,
371 })?;
372 }
373 }
374
375 self.added_files.insert(path);
376
377 Ok(())
378 }
379
380 fn finish(self) -> Result<ComponentSizes, StoreWriterError> {
384 let entries = self.added_files.len() as u64;
385 let mut counter = self
386 .writer
387 .0
388 .finish()
389 .map_err(|error| StoreWriterError::Finish { error })?;
390
391 counter
392 .flush()
393 .map_err(|error| StoreWriterError::Flush { error })?;
394
395 Ok(ComponentSizes {
396 compressed: counter.writer_bytes() as u64,
397 uncompressed: self.uncompressed_size,
398 entries,
399 })
400 }
401}
402
403#[derive(Clone, Copy, Debug, Default)]
405pub struct ComponentSizes {
406 pub compressed: u64,
408 pub uncompressed: u64,
410 pub entries: u64,
412}
413
414#[derive(Clone, Copy, Debug, Default)]
416pub struct StoreSizes {
417 pub log: ComponentSizes,
419 pub store: ComponentSizes,
421}
422
423impl StoreSizes {
424 pub fn total_compressed(&self) -> u64 {
426 self.log.compressed + self.store.compressed
427 }
428
429 pub fn total_uncompressed(&self) -> u64 {
431 self.log.uncompressed + self.store.uncompressed
432 }
433}
434
435fn compress_with_dict(data: &[u8], dict_bytes: &[u8]) -> io::Result<Vec<u8>> {
437 let dict = zstd::dict::EncoderDictionary::copy(dict_bytes, 3);
440 let mut encoder = zstd::stream::Encoder::with_prepared_dictionary(Vec::new(), &dict)?;
441 encoder.write_all(data)?;
442 encoder.finish()
443}
444
445struct SerializeTestEventContext<'a> {
450 store_writer: &'a mut StoreWriter,
451 max_output_size: usize,
452}
453
454impl SerializeTestEventContext<'_> {
455 fn convert_event(
457 &mut self,
458 event: TestEventSummary<LiveSpec>,
459 ) -> Result<TestEventSummary<RecordingSpec>, StoreWriterError> {
460 Ok(TestEventSummary {
461 timestamp: event.timestamp,
462 elapsed: event.elapsed,
463 kind: self.convert_event_kind(event.kind)?,
464 })
465 }
466
467 fn convert_event_kind(
468 &mut self,
469 kind: TestEventKindSummary<LiveSpec>,
470 ) -> Result<TestEventKindSummary<RecordingSpec>, StoreWriterError> {
471 match kind {
472 TestEventKindSummary::Core(core) => Ok(TestEventKindSummary::Core(core)),
473 TestEventKindSummary::Output(output) => Ok(TestEventKindSummary::Output(
474 self.convert_output_event(output)?,
475 )),
476 }
477 }
478
479 fn convert_output_event(
480 &mut self,
481 event: OutputEventKind<LiveSpec>,
482 ) -> Result<OutputEventKind<RecordingSpec>, StoreWriterError> {
483 match event {
484 OutputEventKind::SetupScriptFinished {
485 stress_index,
486 index,
487 total,
488 script_id,
489 program,
490 args,
491 no_capture,
492 run_status,
493 } => {
494 let run_status = self.convert_setup_script_status(&run_status)?;
495 Ok(OutputEventKind::SetupScriptFinished {
496 stress_index,
497 index,
498 total,
499 script_id,
500 program,
501 args,
502 no_capture,
503 run_status,
504 })
505 }
506 OutputEventKind::TestAttemptFailedWillRetry {
507 stress_index,
508 test_instance,
509 run_status,
510 delay_before_next_attempt,
511 failure_output,
512 running,
513 } => {
514 let run_status = self.convert_execute_status(run_status)?;
515 Ok(OutputEventKind::TestAttemptFailedWillRetry {
516 stress_index,
517 test_instance,
518 run_status,
519 delay_before_next_attempt,
520 failure_output,
521 running,
522 })
523 }
524 OutputEventKind::TestFinished {
525 stress_index,
526 test_instance,
527 success_output,
528 failure_output,
529 junit_store_success_output,
530 junit_store_failure_output,
531 junit_flaky_fail_status,
532 run_statuses,
533 current_stats,
534 running,
535 } => {
536 let run_statuses = self.convert_execution_statuses(run_statuses)?;
537 Ok(OutputEventKind::TestFinished {
538 stress_index,
539 test_instance,
540 success_output,
541 failure_output,
542 junit_store_success_output,
543 junit_store_failure_output,
544 junit_flaky_fail_status,
545 run_statuses,
546 current_stats,
547 running,
548 })
549 }
550 }
551 }
552
553 fn convert_setup_script_status(
554 &mut self,
555 status: &SetupScriptExecuteStatus<LiveSpec>,
556 ) -> Result<SetupScriptExecuteStatus<RecordingSpec>, StoreWriterError> {
557 Ok(SetupScriptExecuteStatus {
558 output: self.convert_child_execution_output(&status.output)?,
559 result: status.result.clone(),
560 start_time: status.start_time,
561 time_taken: status.time_taken,
562 is_slow: status.is_slow,
563 env_map: status.env_map.clone(),
564 error_summary: status.error_summary.clone(),
565 })
566 }
567
568 fn convert_execution_statuses(
569 &mut self,
570 statuses: ExecutionStatuses<LiveSpec>,
571 ) -> Result<ExecutionStatuses<RecordingSpec>, StoreWriterError> {
572 let flaky_result = statuses.flaky_result();
573 let statuses = statuses
574 .into_iter()
575 .map(|status| self.convert_execute_status(status))
576 .collect::<Result<Vec<_>, _>>()?;
577 Ok(ExecutionStatuses::new(statuses, flaky_result))
578 }
579
580 fn convert_execute_status(
581 &mut self,
582 status: ExecuteStatus<LiveSpec>,
583 ) -> Result<ExecuteStatus<RecordingSpec>, StoreWriterError> {
584 let output = self.convert_child_execution_output(&status.output)?;
585
586 Ok(ExecuteStatus {
587 retry_data: status.retry_data,
588 output,
589 result: status.result,
590 start_time: status.start_time,
591 time_taken: status.time_taken,
592 is_slow: status.is_slow,
593 delay_before_start: status.delay_before_start,
594 error_summary: status.error_summary,
595 output_error_slice: status.output_error_slice,
596 })
597 }
598
599 fn convert_child_execution_output(
600 &mut self,
601 output: &ChildExecutionOutputDescription<LiveSpec>,
602 ) -> Result<ChildExecutionOutputDescription<RecordingSpec>, StoreWriterError> {
603 match output {
604 ChildExecutionOutputDescription::Output {
605 result,
606 output,
607 errors,
608 } => {
609 let output = self.convert_child_output(output)?;
610 Ok(ChildExecutionOutputDescription::Output {
611 result: result.clone(),
612 output,
613 errors: errors.clone(),
614 })
615 }
616 ChildExecutionOutputDescription::StartError(err) => {
617 Ok(ChildExecutionOutputDescription::StartError(err.clone()))
618 }
619 }
620 }
621
622 fn convert_child_output(
623 &mut self,
624 output: &ChildOutputDescription,
625 ) -> Result<ZipStoreOutputDescription, StoreWriterError> {
626 match output {
627 ChildOutputDescription::Split { stdout, stderr } => {
628 Ok(ZipStoreOutputDescription::Split {
629 stdout: stdout
631 .as_ref()
632 .map(|o| self.write_single_output(Some(o), OutputKind::Stdout))
633 .transpose()?,
634 stderr: stderr
635 .as_ref()
636 .map(|o| self.write_single_output(Some(o), OutputKind::Stderr))
637 .transpose()?,
638 })
639 }
640 ChildOutputDescription::Combined { output } => {
641 Ok(ZipStoreOutputDescription::Combined {
642 output: self.write_single_output(Some(output), OutputKind::Combined)?,
643 })
644 }
645 ChildOutputDescription::NotLoaded => {
646 unreachable!(
647 "NotLoaded output should never be present during recording \
648 (NotLoaded is only produced during replay conversion)"
649 );
650 }
651 }
652 }
653
654 fn write_single_output(
659 &mut self,
660 output: Option<&ChildSingleOutput>,
661 kind: OutputKind,
662 ) -> Result<ZipStoreOutput, StoreWriterError> {
663 let Some(output) = output else {
664 return Ok(ZipStoreOutput::Empty);
665 };
666
667 if output.buf().is_empty() {
668 return Ok(ZipStoreOutput::Empty);
669 }
670
671 let original_len = output.buf().len();
672 let (data, truncated): (Cow<'_, [u8]>, bool) = if original_len <= self.max_output_size {
673 (Cow::Borrowed(output.buf()), false)
674 } else {
675 (truncate_output(output.buf(), self.max_output_size), true)
676 };
677
678 let file_name = OutputFileName::from_content(&data, kind);
679 let file_path = Utf8PathBuf::from(format!("out/{file_name}"));
680
681 self.store_writer.add_file(file_path, &data)?;
682
683 if truncated {
684 Ok(ZipStoreOutput::Truncated {
685 file_name,
686 original_size: original_len as u64,
687 })
688 } else {
689 Ok(ZipStoreOutput::Full { file_name })
690 }
691 }
692}
693
694fn truncate_output(buf: &[u8], max_size: usize) -> Cow<'_, [u8]> {
701 if buf.len() <= max_size {
702 return Cow::Borrowed(buf);
703 }
704
705 let truncated_bytes = buf.len() - max_size;
706 let marker = format!("\n\n... [truncated {truncated_bytes} bytes] ...\n\n");
707 let marker_bytes = marker.as_bytes();
708
709 let content_space = max_size.saturating_sub(marker_bytes.len());
710 let head_size = content_space / 2;
711 let tail_size = content_space - head_size;
712
713 let mut result = Vec::with_capacity(max_size);
714 result.extend_from_slice(&buf[..head_size]);
715 result.extend_from_slice(marker_bytes);
716 result.extend_from_slice(&buf[buf.len() - tail_size..]);
717
718 Cow::Owned(result)
719}
720
721#[cfg(test)]
722mod tests {
723 use super::*;
724 use crate::record::dicts;
725
726 #[test]
727 fn test_truncate_output_no_truncation_needed() {
728 let input = b"hello world";
729 let result = truncate_output(input, 100);
730 assert_eq!(&*result, input);
731 assert!(matches!(result, Cow::Borrowed(_)), "should be borrowed");
732 }
733
734 #[test]
735 fn test_truncate_output_exact_size() {
736 let input = b"exactly100bytes";
737 let result = truncate_output(input, input.len());
738 assert_eq!(&*result, input);
739 assert!(matches!(result, Cow::Borrowed(_)), "should be borrowed");
740 }
741
742 #[test]
743 fn test_truncate_output_basic() {
744 let input: Vec<u8> = (0..200).collect();
746 let max_size = 100;
747
748 let result = truncate_output(&input, max_size);
749
750 assert!(matches!(result, Cow::Owned(_)), "should be owned");
752
753 assert!(
755 result.len() <= max_size,
756 "result len {} should be <= max_size {}",
757 result.len(),
758 max_size
759 );
760
761 let result_str = String::from_utf8_lossy(&result);
763 assert!(
764 result_str.contains("[truncated"),
765 "should contain truncation marker: {result_str:?}"
766 );
767 assert!(
768 result_str.contains("bytes]"),
769 "should contain 'bytes]': {result_str:?}"
770 );
771
772 assert!(
774 result.starts_with(&[0, 1, 2]),
775 "should start with beginning of input"
776 );
777
778 assert!(
780 result.ends_with(&[197, 198, 199]),
781 "should end with end of input"
782 );
783 }
784
785 #[test]
786 fn test_truncate_output_preserves_head_and_tail() {
787 let head = b"HEAD_CONTENT_";
788 let middle = vec![b'x'; 1000];
789 let tail = b"_TAIL_CONTENT";
790
791 let mut input = Vec::new();
792 input.extend_from_slice(head);
793 input.extend_from_slice(&middle);
794 input.extend_from_slice(tail);
795
796 let max_size = 200;
797 let result = truncate_output(&input, max_size);
798
799 assert!(result.len() <= max_size);
800
801 assert!(
803 result.starts_with(b"HEAD"),
804 "should preserve head: {:?}",
805 String::from_utf8_lossy(&result[..20])
806 );
807
808 assert!(
810 result.ends_with(b"CONTENT"),
811 "should preserve tail: {:?}",
812 String::from_utf8_lossy(&result[result.len() - 20..])
813 );
814 }
815
816 #[test]
817 fn test_truncate_output_marker_shows_correct_count() {
818 let input: Vec<u8> = vec![b'a'; 1000];
819 let max_size = 100;
820
821 let result = truncate_output(&input, max_size);
822 let result_str = String::from_utf8_lossy(&result);
823
824 assert!(
826 result_str.contains("[truncated 900 bytes]"),
827 "should show correct truncation count: {result_str:?}"
828 );
829 }
830
831 #[test]
832 fn test_truncate_output_large_input() {
833 let input: Vec<u8> = vec![b'x'; 20 * 1024 * 1024]; let max_size = 10 * 1024 * 1024; let result = truncate_output(&input, max_size);
838
839 assert!(
840 result.len() <= max_size,
841 "result {} should be <= max_size {}",
842 result.len(),
843 max_size
844 );
845
846 let result_str = String::from_utf8_lossy(&result);
847 assert!(
848 result_str.contains("[truncated"),
849 "should contain truncation marker"
850 );
851 }
852
853 #[test]
854 fn test_truncate_output_max_size_smaller_than_marker() {
855 let input: Vec<u8> = vec![b'x'; 100];
859 let max_size = 10; let result = truncate_output(&input, max_size);
862
863 let result_str = String::from_utf8_lossy(&result);
867 assert!(
868 result_str.contains("[truncated"),
869 "should still contain truncation marker: {result_str:?}"
870 );
871
872 assert!(
874 result_str.starts_with("\n\n..."),
875 "should start with marker prefix"
876 );
877 assert!(
878 result_str.ends_with("...\n\n"),
879 "should end with marker suffix"
880 );
881 }
882
883 #[test]
884 fn test_truncate_output_max_size_zero() {
885 let input: Vec<u8> = vec![b'x'; 50];
887 let max_size = 0;
888
889 let result = truncate_output(&input, max_size);
890
891 let result_str = String::from_utf8_lossy(&result);
893 assert!(
894 result_str.contains("[truncated 50 bytes]"),
895 "should show correct truncation count: {result_str:?}"
896 );
897 }
898
899 #[test]
900 fn test_compress_with_dict_stdout() {
901 let test_output = b"running 1 test\ntest tests::my_test ... ok\n\ntest result: ok. 1 passed; 0 failed; 0 ignored\n";
903
904 let compressed =
906 compress_with_dict(test_output, dicts::STDOUT).expect("compression failed");
907
908 let dict = zstd::dict::DecoderDictionary::copy(dicts::STDOUT);
910 let mut decoder = zstd::stream::Decoder::with_prepared_dictionary(&compressed[..], &dict)
911 .expect("decoder creation failed");
912 let mut decompressed = Vec::new();
913 io::Read::read_to_end(&mut decoder, &mut decompressed).expect("decompression failed");
914
915 assert_eq!(decompressed, test_output, "round-trip should preserve data");
916 }
917}