1use super::{
12 dicts,
13 format::{
14 CARGO_METADATA_JSON_PATH, OutputDict, RECORD_OPTS_JSON_PATH, RUN_LOG_FILE_NAME,
15 STDERR_DICT_PATH, STDOUT_DICT_PATH, STORE_ZIP_FILE_NAME, TEST_LIST_JSON_PATH,
16 },
17 summary::{
18 OutputEventKind, OutputFileName, OutputKind, RecordOpts, TestEventKindSummary,
19 TestEventSummary, ZipStoreOutput,
20 },
21};
22use crate::{
23 errors::{RunStoreError, StoreWriterError},
24 record::format::{RERUN_INFO_JSON_PATH, RerunInfo},
25 reporter::events::{
26 ChildExecutionOutputDescription, ChildOutputDescription, ExecuteStatus, ExecutionStatuses,
27 SetupScriptExecuteStatus,
28 },
29 test_output::ChildSingleOutput,
30};
31use camino::{Utf8Path, Utf8PathBuf};
32use countio::Counter;
33use debug_ignore::DebugIgnore;
34use nextest_metadata::TestListSummary;
35use std::{
36 borrow::Cow,
37 collections::HashSet,
38 fs::File,
39 io::{self, Write},
40};
41use zip::{CompressionMethod, ZipWriter};
42
43struct LogEncoder {
54 inner: Option<Counter<zstd::stream::Encoder<'static, Counter<File>>>>,
57}
58
59impl std::fmt::Debug for LogEncoder {
60 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
61 f.debug_struct("LogEncoder").finish_non_exhaustive()
62 }
63}
64
65impl LogEncoder {
66 fn new(encoder: zstd::stream::Encoder<'static, Counter<File>>) -> Self {
67 Self {
68 inner: Some(Counter::new(encoder)),
69 }
70 }
71
72 fn finish(mut self, entries: u64) -> io::Result<ComponentSizes> {
76 let counter = self.inner.take().expect("encoder already finished");
77 let uncompressed = counter.writer_bytes() as u64;
78 let file_counter = counter.into_inner().finish()?;
79 let compressed = file_counter.writer_bytes() as u64;
80 Ok(ComponentSizes {
81 compressed,
82 uncompressed,
83 entries,
84 })
85 }
86}
87
88impl Write for LogEncoder {
89 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
90 self.inner
91 .as_mut()
92 .expect("encoder already finished")
93 .write(buf)
94 }
95
96 fn flush(&mut self) -> io::Result<()> {
97 self.inner
98 .as_mut()
99 .expect("encoder already finished")
100 .flush()
101 }
102}
103
104impl Drop for LogEncoder {
105 fn drop(&mut self) {
106 if let Some(counter) = self.inner.take() {
107 let _ = counter.into_inner().finish();
112 }
113 }
114}
115
116#[derive(Debug)]
121pub struct RunRecorder {
122 store_path: Utf8PathBuf,
123 store_writer: StoreWriter,
124 log_path: Utf8PathBuf,
125 log: DebugIgnore<LogEncoder>,
126 log_entries: u64,
128 max_output_size: usize,
129}
130
131impl RunRecorder {
132 pub(super) fn new(
137 run_dir: Utf8PathBuf,
138 max_output_size: bytesize::ByteSize,
139 ) -> Result<Self, RunStoreError> {
140 std::fs::create_dir_all(&run_dir).map_err(|error| RunStoreError::RunDirCreate {
141 run_dir: run_dir.clone(),
142 error,
143 })?;
144
145 let store_path = run_dir.join(STORE_ZIP_FILE_NAME);
146 let store_writer =
147 StoreWriter::new(&store_path).map_err(|error| RunStoreError::StoreWrite {
148 store_path: store_path.clone(),
149 error,
150 })?;
151
152 let log_path = run_dir.join(RUN_LOG_FILE_NAME);
153 let file = std::fs::OpenOptions::new()
154 .create(true)
155 .truncate(true)
156 .write(true)
157 .open(&log_path)
158 .map_err(|error| RunStoreError::RunLogCreate {
159 path: log_path.clone(),
160 error,
161 })?;
162
163 let encoder = zstd::stream::Encoder::new(Counter::new(file), 3).map_err(|error| {
167 RunStoreError::RunLogCreate {
168 path: log_path.clone(),
169 error,
170 }
171 })?;
172 let log = LogEncoder::new(encoder);
173
174 Ok(Self {
175 store_path,
176 store_writer,
177 log_path,
178 log: DebugIgnore(log),
179 log_entries: 0,
180 max_output_size: usize::try_from(max_output_size.as_u64()).unwrap_or(usize::MAX),
183 })
184 }
185
186 pub(crate) fn write_meta(
193 &mut self,
194 cargo_metadata_json: &str,
195 test_list: &TestListSummary,
196 opts: &RecordOpts,
197 ) -> Result<(), RunStoreError> {
198 let test_list_json = serde_json::to_string(test_list)
199 .map_err(|error| RunStoreError::TestListSerialize { error })?;
200
201 let opts_json = serde_json::to_string(opts)
202 .map_err(|error| RunStoreError::RecordOptionsSerialize { error })?;
203
204 self.write_archive_file(TEST_LIST_JSON_PATH, test_list_json.as_bytes())?;
205 self.write_archive_file(CARGO_METADATA_JSON_PATH, cargo_metadata_json.as_bytes())?;
206 self.write_archive_file(RECORD_OPTS_JSON_PATH, opts_json.as_bytes())?;
207
208 self.write_archive_file(STDOUT_DICT_PATH, dicts::STDOUT)?;
210 self.write_archive_file(STDERR_DICT_PATH, dicts::STDERR)?;
211
212 Ok(())
213 }
214
215 pub(crate) fn write_rerun_info(&mut self, rerun_info: &RerunInfo) -> Result<(), RunStoreError> {
219 let rerun_info_json = serde_json::to_string(rerun_info)
220 .map_err(|error| RunStoreError::RerunInfoSerialize { error })?;
221
222 self.write_archive_file(RERUN_INFO_JSON_PATH, rerun_info_json.as_bytes())?;
223
224 Ok(())
225 }
226
227 fn write_archive_file(&mut self, path: &str, bytes: &[u8]) -> Result<(), RunStoreError> {
228 self.store_writer
229 .add_file(Utf8PathBuf::from(path), bytes)
230 .map_err(|error| RunStoreError::StoreWrite {
231 store_path: self.store_path.clone(),
232 error,
233 })
234 }
235
236 pub(crate) fn write_event(
241 &mut self,
242 event: TestEventSummary<ChildSingleOutput>,
243 ) -> Result<(), RunStoreError> {
244 let mut cx = SerializeTestEventContext {
245 store_writer: &mut self.store_writer,
246 max_output_size: self.max_output_size,
247 };
248
249 let event = cx
250 .convert_event(event)
251 .map_err(|error| RunStoreError::StoreWrite {
252 store_path: self.store_path.clone(),
253 error,
254 })?;
255
256 let json = serde_json::to_string(&event)
257 .map_err(|error| RunStoreError::TestEventSerialize { error })?;
258 self.write_log_impl(json.as_bytes())?;
259 self.write_log_impl(b"\n")?;
260
261 self.log_entries += 1;
262
263 Ok(())
264 }
265
266 fn write_log_impl(&mut self, bytes: &[u8]) -> Result<(), RunStoreError> {
267 self.log
268 .write_all(bytes)
269 .map_err(|error| RunStoreError::RunLogWrite {
270 path: self.log_path.clone(),
271 error,
272 })
273 }
274
275 pub(crate) fn finish(self) -> Result<StoreSizes, RunStoreError> {
280 let log_sizes =
281 self.log
282 .0
283 .finish(self.log_entries)
284 .map_err(|error| RunStoreError::RunLogFlush {
285 path: self.log_path.clone(),
286 error,
287 })?;
288
289 let store_sizes =
290 self.store_writer
291 .finish()
292 .map_err(|error| RunStoreError::StoreWrite {
293 store_path: self.store_path.clone(),
294 error,
295 })?;
296
297 Ok(StoreSizes {
298 log: log_sizes,
299 store: store_sizes,
300 })
301 }
302}
303
304#[derive(Debug)]
306pub(crate) struct StoreWriter {
307 writer: DebugIgnore<ZipWriter<Counter<File>>>,
308 added_files: HashSet<Utf8PathBuf>,
309 uncompressed_size: u64,
311}
312
313impl StoreWriter {
314 fn new(store_path: &Utf8Path) -> Result<Self, StoreWriterError> {
316 let zip_file = std::fs::OpenOptions::new()
317 .create(true)
318 .truncate(true)
319 .write(true)
320 .open(store_path)
321 .map_err(|error| StoreWriterError::Create { error })?;
322 let writer = ZipWriter::new(Counter::new(zip_file));
323
324 Ok(Self {
325 writer: DebugIgnore(writer),
326 added_files: HashSet::new(),
327 uncompressed_size: 0,
328 })
329 }
330
331 fn add_file(&mut self, path: Utf8PathBuf, contents: &[u8]) -> Result<(), StoreWriterError> {
338 if self.added_files.contains(&path) {
339 return Ok(());
340 }
341
342 self.uncompressed_size += contents.len() as u64;
344
345 let dict = OutputDict::for_path(&path);
346 match dict.dict_bytes() {
347 Some(dict_bytes) => {
348 let compressed = compress_with_dict(contents, dict_bytes)
349 .map_err(|error| StoreWriterError::Compress { error })?;
350
351 let options = zip::write::FileOptions::<'_, ()>::default()
352 .compression_method(CompressionMethod::Stored);
353 self.writer
354 .start_file(path.as_str(), options)
355 .map_err(|error| StoreWriterError::StartFile {
356 path: path.clone(),
357 error,
358 })?;
359 self.writer
360 .write_all(&compressed)
361 .map_err(|error| StoreWriterError::Write {
362 path: path.clone(),
363 error,
364 })?;
365 }
366 None => {
367 let options = zip::write::FileOptions::<'_, ()>::default()
368 .compression_method(CompressionMethod::Zstd);
369 self.writer
370 .start_file(path.as_str(), options)
371 .map_err(|error| StoreWriterError::StartFile {
372 path: path.clone(),
373 error,
374 })?;
375 self.writer
376 .write_all(contents)
377 .map_err(|error| StoreWriterError::Write {
378 path: path.clone(),
379 error,
380 })?;
381 }
382 }
383
384 self.added_files.insert(path);
385
386 Ok(())
387 }
388
389 fn finish(self) -> Result<ComponentSizes, StoreWriterError> {
393 let entries = self.added_files.len() as u64;
394 let mut counter = self
395 .writer
396 .0
397 .finish()
398 .map_err(|error| StoreWriterError::Finish { error })?;
399
400 counter
401 .flush()
402 .map_err(|error| StoreWriterError::Flush { error })?;
403
404 Ok(ComponentSizes {
405 compressed: counter.writer_bytes() as u64,
406 uncompressed: self.uncompressed_size,
407 entries,
408 })
409 }
410}
411
412#[derive(Clone, Copy, Debug, Default)]
414pub struct ComponentSizes {
415 pub compressed: u64,
417 pub uncompressed: u64,
419 pub entries: u64,
421}
422
423#[derive(Clone, Copy, Debug, Default)]
425pub struct StoreSizes {
426 pub log: ComponentSizes,
428 pub store: ComponentSizes,
430}
431
432impl StoreSizes {
433 pub fn total_compressed(&self) -> u64 {
435 self.log.compressed + self.store.compressed
436 }
437
438 pub fn total_uncompressed(&self) -> u64 {
440 self.log.uncompressed + self.store.uncompressed
441 }
442}
443
444fn compress_with_dict(data: &[u8], dict_bytes: &[u8]) -> io::Result<Vec<u8>> {
446 let dict = zstd::dict::EncoderDictionary::copy(dict_bytes, 3);
449 let mut encoder = zstd::stream::Encoder::with_prepared_dictionary(Vec::new(), &dict)?;
450 encoder.write_all(data)?;
451 encoder.finish()
452}
453
454struct SerializeTestEventContext<'a> {
459 store_writer: &'a mut StoreWriter,
460 max_output_size: usize,
461}
462
463impl SerializeTestEventContext<'_> {
464 fn convert_event(
466 &mut self,
467 event: TestEventSummary<ChildSingleOutput>,
468 ) -> Result<TestEventSummary<ZipStoreOutput>, StoreWriterError> {
469 Ok(TestEventSummary {
470 timestamp: event.timestamp,
471 elapsed: event.elapsed,
472 kind: self.convert_event_kind(event.kind)?,
473 })
474 }
475
476 fn convert_event_kind(
477 &mut self,
478 kind: TestEventKindSummary<ChildSingleOutput>,
479 ) -> Result<TestEventKindSummary<ZipStoreOutput>, StoreWriterError> {
480 match kind {
481 TestEventKindSummary::Core(core) => Ok(TestEventKindSummary::Core(core)),
482 TestEventKindSummary::Output(output) => Ok(TestEventKindSummary::Output(
483 self.convert_output_event(output)?,
484 )),
485 }
486 }
487
488 fn convert_output_event(
489 &mut self,
490 event: OutputEventKind<ChildSingleOutput>,
491 ) -> Result<OutputEventKind<ZipStoreOutput>, StoreWriterError> {
492 match event {
493 OutputEventKind::SetupScriptFinished {
494 stress_index,
495 index,
496 total,
497 script_id,
498 program,
499 args,
500 no_capture,
501 run_status,
502 } => {
503 let run_status = self.convert_setup_script_status(&run_status)?;
504 Ok(OutputEventKind::SetupScriptFinished {
505 stress_index,
506 index,
507 total,
508 script_id,
509 program,
510 args,
511 no_capture,
512 run_status,
513 })
514 }
515 OutputEventKind::TestAttemptFailedWillRetry {
516 stress_index,
517 test_instance,
518 run_status,
519 delay_before_next_attempt,
520 failure_output,
521 running,
522 } => {
523 let run_status = self.convert_execute_status(run_status)?;
524 Ok(OutputEventKind::TestAttemptFailedWillRetry {
525 stress_index,
526 test_instance,
527 run_status,
528 delay_before_next_attempt,
529 failure_output,
530 running,
531 })
532 }
533 OutputEventKind::TestFinished {
534 stress_index,
535 test_instance,
536 success_output,
537 failure_output,
538 junit_store_success_output,
539 junit_store_failure_output,
540 run_statuses,
541 current_stats,
542 running,
543 } => {
544 let run_statuses = self.convert_execution_statuses(run_statuses)?;
545 Ok(OutputEventKind::TestFinished {
546 stress_index,
547 test_instance,
548 success_output,
549 failure_output,
550 junit_store_success_output,
551 junit_store_failure_output,
552 run_statuses,
553 current_stats,
554 running,
555 })
556 }
557 }
558 }
559
560 fn convert_setup_script_status(
561 &mut self,
562 status: &SetupScriptExecuteStatus<ChildSingleOutput>,
563 ) -> Result<SetupScriptExecuteStatus<ZipStoreOutput>, StoreWriterError> {
564 Ok(SetupScriptExecuteStatus {
565 output: self.convert_child_execution_output(&status.output)?,
566 result: status.result.clone(),
567 start_time: status.start_time,
568 time_taken: status.time_taken,
569 is_slow: status.is_slow,
570 env_map: status.env_map.clone(),
571 error_summary: status.error_summary.clone(),
572 })
573 }
574
575 fn convert_execution_statuses(
576 &mut self,
577 statuses: ExecutionStatuses<ChildSingleOutput>,
578 ) -> Result<ExecutionStatuses<ZipStoreOutput>, StoreWriterError> {
579 let statuses = statuses
580 .into_iter()
581 .map(|status| self.convert_execute_status(status))
582 .collect::<Result<Vec<_>, _>>()?;
583 Ok(ExecutionStatuses::new(statuses))
584 }
585
586 fn convert_execute_status(
587 &mut self,
588 status: ExecuteStatus<ChildSingleOutput>,
589 ) -> Result<ExecuteStatus<ZipStoreOutput>, StoreWriterError> {
590 let output = self.convert_child_execution_output(&status.output)?;
591
592 Ok(ExecuteStatus {
593 retry_data: status.retry_data,
594 output,
595 result: status.result,
596 start_time: status.start_time,
597 time_taken: status.time_taken,
598 is_slow: status.is_slow,
599 delay_before_start: status.delay_before_start,
600 error_summary: status.error_summary,
601 output_error_slice: status.output_error_slice,
602 })
603 }
604
605 fn convert_child_execution_output(
606 &mut self,
607 output: &ChildExecutionOutputDescription<ChildSingleOutput>,
608 ) -> Result<ChildExecutionOutputDescription<ZipStoreOutput>, StoreWriterError> {
609 match output {
610 ChildExecutionOutputDescription::Output {
611 result,
612 output,
613 errors,
614 } => {
615 let output = self.convert_child_output(output)?;
616 Ok(ChildExecutionOutputDescription::Output {
617 result: result.clone(),
618 output,
619 errors: errors.clone(),
620 })
621 }
622 ChildExecutionOutputDescription::StartError(err) => {
623 Ok(ChildExecutionOutputDescription::StartError(err.clone()))
624 }
625 }
626 }
627
628 fn convert_child_output(
629 &mut self,
630 output: &ChildOutputDescription<ChildSingleOutput>,
631 ) -> Result<ChildOutputDescription<ZipStoreOutput>, StoreWriterError> {
632 match output {
633 ChildOutputDescription::Split { stdout, stderr } => Ok(ChildOutputDescription::Split {
634 stdout: Some(self.write_single_output(stdout.as_ref(), OutputKind::Stdout)?),
635 stderr: Some(self.write_single_output(stderr.as_ref(), OutputKind::Stderr)?),
636 }),
637 ChildOutputDescription::Combined { output } => Ok(ChildOutputDescription::Combined {
638 output: self.write_single_output(Some(output), OutputKind::Combined)?,
639 }),
640 }
641 }
642
643 fn write_single_output(
648 &mut self,
649 output: Option<&ChildSingleOutput>,
650 kind: OutputKind,
651 ) -> Result<ZipStoreOutput, StoreWriterError> {
652 let Some(output) = output else {
653 return Ok(ZipStoreOutput::Empty);
654 };
655
656 if output.buf.is_empty() {
657 return Ok(ZipStoreOutput::Empty);
658 }
659
660 let original_len = output.buf.len();
661 let (data, truncated): (Cow<'_, [u8]>, bool) = if original_len <= self.max_output_size {
662 (Cow::Borrowed(&output.buf), false)
663 } else {
664 (truncate_output(&output.buf, self.max_output_size), true)
665 };
666
667 let file_name = OutputFileName::from_content(&data, kind);
668 let file_path = Utf8PathBuf::from(format!("out/{file_name}"));
669
670 self.store_writer.add_file(file_path, &data)?;
671
672 if truncated {
673 Ok(ZipStoreOutput::Truncated {
674 file_name,
675 original_size: original_len as u64,
676 })
677 } else {
678 Ok(ZipStoreOutput::Full { file_name })
679 }
680 }
681}
682
683fn truncate_output(buf: &[u8], max_size: usize) -> Cow<'_, [u8]> {
690 if buf.len() <= max_size {
691 return Cow::Borrowed(buf);
692 }
693
694 let truncated_bytes = buf.len() - max_size;
695 let marker = format!("\n\n... [truncated {truncated_bytes} bytes] ...\n\n");
696 let marker_bytes = marker.as_bytes();
697
698 let content_space = max_size.saturating_sub(marker_bytes.len());
699 let head_size = content_space / 2;
700 let tail_size = content_space - head_size;
701
702 let mut result = Vec::with_capacity(max_size);
703 result.extend_from_slice(&buf[..head_size]);
704 result.extend_from_slice(marker_bytes);
705 result.extend_from_slice(&buf[buf.len() - tail_size..]);
706
707 Cow::Owned(result)
708}
709
710#[cfg(test)]
711mod tests {
712 use super::*;
713 use crate::record::dicts;
714
715 #[test]
716 fn test_truncate_output_no_truncation_needed() {
717 let input = b"hello world";
718 let result = truncate_output(input, 100);
719 assert_eq!(&*result, input);
720 assert!(matches!(result, Cow::Borrowed(_)), "should be borrowed");
721 }
722
723 #[test]
724 fn test_truncate_output_exact_size() {
725 let input = b"exactly100bytes";
726 let result = truncate_output(input, input.len());
727 assert_eq!(&*result, input);
728 assert!(matches!(result, Cow::Borrowed(_)), "should be borrowed");
729 }
730
731 #[test]
732 fn test_truncate_output_basic() {
733 let input: Vec<u8> = (0..200).collect();
735 let max_size = 100;
736
737 let result = truncate_output(&input, max_size);
738
739 assert!(matches!(result, Cow::Owned(_)), "should be owned");
741
742 assert!(
744 result.len() <= max_size,
745 "result len {} should be <= max_size {}",
746 result.len(),
747 max_size
748 );
749
750 let result_str = String::from_utf8_lossy(&result);
752 assert!(
753 result_str.contains("[truncated"),
754 "should contain truncation marker: {result_str:?}"
755 );
756 assert!(
757 result_str.contains("bytes]"),
758 "should contain 'bytes]': {result_str:?}"
759 );
760
761 assert!(
763 result.starts_with(&[0, 1, 2]),
764 "should start with beginning of input"
765 );
766
767 assert!(
769 result.ends_with(&[197, 198, 199]),
770 "should end with end of input"
771 );
772 }
773
774 #[test]
775 fn test_truncate_output_preserves_head_and_tail() {
776 let head = b"HEAD_CONTENT_";
777 let middle = vec![b'x'; 1000];
778 let tail = b"_TAIL_CONTENT";
779
780 let mut input = Vec::new();
781 input.extend_from_slice(head);
782 input.extend_from_slice(&middle);
783 input.extend_from_slice(tail);
784
785 let max_size = 200;
786 let result = truncate_output(&input, max_size);
787
788 assert!(result.len() <= max_size);
789
790 assert!(
792 result.starts_with(b"HEAD"),
793 "should preserve head: {:?}",
794 String::from_utf8_lossy(&result[..20])
795 );
796
797 assert!(
799 result.ends_with(b"CONTENT"),
800 "should preserve tail: {:?}",
801 String::from_utf8_lossy(&result[result.len() - 20..])
802 );
803 }
804
805 #[test]
806 fn test_truncate_output_marker_shows_correct_count() {
807 let input: Vec<u8> = vec![b'a'; 1000];
808 let max_size = 100;
809
810 let result = truncate_output(&input, max_size);
811 let result_str = String::from_utf8_lossy(&result);
812
813 assert!(
815 result_str.contains("[truncated 900 bytes]"),
816 "should show correct truncation count: {result_str:?}"
817 );
818 }
819
820 #[test]
821 fn test_truncate_output_large_input() {
822 let input: Vec<u8> = vec![b'x'; 20 * 1024 * 1024]; let max_size = 10 * 1024 * 1024; let result = truncate_output(&input, max_size);
827
828 assert!(
829 result.len() <= max_size,
830 "result {} should be <= max_size {}",
831 result.len(),
832 max_size
833 );
834
835 let result_str = String::from_utf8_lossy(&result);
836 assert!(
837 result_str.contains("[truncated"),
838 "should contain truncation marker"
839 );
840 }
841
842 #[test]
843 fn test_truncate_output_max_size_smaller_than_marker() {
844 let input: Vec<u8> = vec![b'x'; 100];
848 let max_size = 10; let result = truncate_output(&input, max_size);
851
852 let result_str = String::from_utf8_lossy(&result);
856 assert!(
857 result_str.contains("[truncated"),
858 "should still contain truncation marker: {result_str:?}"
859 );
860
861 assert!(
863 result_str.starts_with("\n\n..."),
864 "should start with marker prefix"
865 );
866 assert!(
867 result_str.ends_with("...\n\n"),
868 "should end with marker suffix"
869 );
870 }
871
872 #[test]
873 fn test_truncate_output_max_size_zero() {
874 let input: Vec<u8> = vec![b'x'; 50];
876 let max_size = 0;
877
878 let result = truncate_output(&input, max_size);
879
880 let result_str = String::from_utf8_lossy(&result);
882 assert!(
883 result_str.contains("[truncated 50 bytes]"),
884 "should show correct truncation count: {result_str:?}"
885 );
886 }
887
888 #[test]
889 fn test_compress_with_dict_stdout() {
890 let test_output = b"running 1 test\ntest tests::my_test ... ok\n\ntest result: ok. 1 passed; 0 failed; 0 ignored\n";
892
893 let compressed =
895 compress_with_dict(test_output, dicts::STDOUT).expect("compression failed");
896
897 let dict = zstd::dict::DecoderDictionary::copy(dicts::STDOUT);
899 let mut decoder = zstd::stream::Decoder::with_prepared_dictionary(&compressed[..], &dict)
900 .expect("decoder creation failed");
901 let mut decompressed = Vec::new();
902 io::Read::read_to_end(&mut decoder, &mut decompressed).expect("decompression failed");
903
904 assert_eq!(decompressed, test_output, "round-trip should preserve data");
905 }
906}