diff --git a/src/chunks.rs b/src/chunks.rs index 7596ef2..38d2fa6 100644 --- a/src/chunks.rs +++ b/src/chunks.rs @@ -27,7 +27,7 @@ pub struct GenericChunk { #[derive(Debug, Clone)] pub struct MetaChunk { pub chunk_count: u32, - entries_per_chunk: u32, + pub entries_per_chunk: u32, pub entry_count: u64, pub compression_method: Option, } diff --git a/src/io.rs b/src/io.rs index ce53cb0..e51d63d 100644 --- a/src/io.rs +++ b/src/io.rs @@ -62,7 +62,7 @@ impl BDFWriter { /// the data will be written to the file pub fn add_data_entry(&mut self, data_entry: DataEntry) -> Result<(), Error> { self.data_entries.push(data_entry); - if self.data_entries.len() >= ENTRIES_PER_CHUNK as usize { + if self.data_entries.len() >= self.metadata.entries_per_chunk as usize { self.flush()?; } @@ -102,6 +102,17 @@ impl BDFWriter { pub fn set_compression_level(&mut self, level: u32) { self.compression_level = level; } + + /// Changes the entries per chunk value. + /// Returns an error if the metadata has already been written. + pub fn set_entries_per_chunk(&mut self, number: u32) -> Result<(), Error>{ + if self.head_written { + return Err(Error::new(ErrorKind::Other, "the head has already been written")) + } + self.metadata.entries_per_chunk = number; + + Ok(()) + } } impl BDFReader { diff --git a/src/lib.rs b/src/lib.rs index 1250cef..6b46f94 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -13,6 +13,7 @@ mod tests { #[test] fn it_writes_uncompressed() -> Result<(), Error> { let mut writer = new_writer("tmp1.bdf", 2, false)?; + writer.set_entries_per_chunk(24)?; writer.add_lookup_entry(HashEntry::new(BAR.to_string(), 5))?; writer.add_lookup_entry(HashEntry::new(FOO.to_string(), 4))?;