diff --git a/Cargo.toml b/Cargo.toml index 52b9e19..7065a4e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bdflib" -version = "0.1.4" +version = "0.1.5" authors = ["trivernis "] edition = "2018" license-file = "LICENSE" diff --git a/README.md b/README.md index 1fb776e..bfcf472 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,54 @@ This library provides methods to read and write Binary Dictionary Format files that can be used to represent rainbow tables. +## Usage + +### Read + +```rust +use bdf::io::BDFReader; +use std::fs::File; +use std::io::BufReader; + +fn main() { + let f = File::open("dictionary.bdf").unwrap(); + let buf_reader = BufReader::new(f); + let mut bdf_reader = BDFReader::new(buf_reader); + bdf_reader.read_metadata().unwrap(); + let lookup_table = bdf_reader.read_lookup_table().unwrap(); + let lookup_table = lookup_table.clone(); + while let Ok(next_chunk) = &mut bdf_reader.next_chunk() { + if let Ok(entries) = next_chunk.data_entries(&lookup_table) { + println!("{:?}", entries); + } + } +} +``` + +### Write + +```rust +use bdf::chunks::{DataEntry, HashEntry}; +use bdf::io::BDFWriter; +use std::fs::File; +use std::io::BufWriter; +use std::convert::Into; + +fn main() { + let f = File::create("dictionary.bdf").unwrap(); + let buf_writer = BufWriter::new(f); + let entry_count = 1; + let mut bdf_writer = BDFWriter::new(buf_writer, entry_count, false); + bdf_writer.add_lookup_entry(HashEntry::new("fakehash".into(), 3)).unwrap(); + let mut entry = DataEntry::new("foo".into()); + entry.add_hash_value("fakehash".into(), vec![0, 2, 3]); + bdf_writer.add_data_entry(entry).unwrap(); + bdf_writer.flush().unwrap(); + bdf_writer.flush_writer().unwrap(); + println!("Finished writing!"); +} +``` + ## Binary Dictionary File Format (bdf) ``` diff --git a/src/io.rs b/src/io.rs index 83f994c..cef0bf3 100644 --- a/src/io.rs +++ b/src/io.rs @@ -1,14 +1,13 @@ use super::chunks::*; -use std::io::{Write, BufWriter, ErrorKind, BufReader, Read}; -use std::fs::File; -use std::collections::HashMap; -use std::io::Error; use byteorder::{BigEndian, ByteOrder}; +use std::collections::HashMap; use std::convert::TryInto; +use std::fs::File; +use std::io::Error; +use std::io::{BufReader, BufWriter, ErrorKind, Read, Write}; const ENTRIES_PER_CHUNK: u32 = 100_000; - pub struct BDFReader { reader: BufReader, pub metadata: Option, @@ -16,7 +15,6 @@ pub struct BDFReader { compressed: bool, } - pub struct BDFWriter { writer: BufWriter, metadata: MetaChunk, @@ -28,7 +26,14 @@ pub struct BDFWriter { } impl BDFWriter { - + /// Creates a new BDFWriter. + /// The number for `entry_count` should be the total number of entries + /// This is required since the META chunk containing the information is the + /// first chunk to be written. + /// The number of entries can be used in tools that provide a progress + /// bar for how many entries were read. + /// If the `compress` parameter is true, each data chunk will be compressed + /// using lzma with a default level of 1. pub fn new(writer: BufWriter, entry_count: u64, compress: bool) -> Self { Self { metadata: MetaChunk::new(entry_count, ENTRIES_PER_CHUNK, compress), @@ -70,7 +75,6 @@ impl BDFWriter { } /// Writes the data to the file - pub fn flush(&mut self) -> Result<(), Error> { if !self.head_written { self.writer.write(BDF_HDR)?; @@ -98,6 +102,15 @@ impl BDFWriter { self.writer.flush() } + /// Flushes the buffered chunk data and the writer + /// to finish the file. + pub fn finish(&mut self) -> Result<(), Error> { + self.flush()?; + self.flush_writer()?; + + Ok(()) + } + /// Sets the compression level for lzma compression pub fn set_compression_level(&mut self, level: u32) { self.compression_level = level; @@ -105,18 +118,21 @@ impl BDFWriter { /// Changes the entries per chunk value. /// Returns an error if the metadata has already been written. - pub fn set_entries_per_chunk(&mut self, number: u32) -> Result<(), Error>{ + pub fn set_entries_per_chunk(&mut self, number: u32) -> Result<(), Error> { if self.head_written { - return Err(Error::new(ErrorKind::Other, "the head has already been written")) + return Err(Error::new( + ErrorKind::Other, + "the head has already been written", + )); } self.metadata.entries_per_chunk = number; - self.metadata.chunk_count = (self.metadata.entry_count as f64 / number as f64).ceil() as u32; + self.metadata.chunk_count = + (self.metadata.entry_count as f64 / number as f64).ceil() as u32; Ok(()) } } impl BDFReader { - /// Creates a new BDFReader pub fn new(reader: BufReader) -> Self { Self { @@ -127,6 +143,14 @@ impl BDFReader { } } + /// Reads the metadata and lookup table + pub fn read_start(&mut self) -> Result<(), Error> { + self.read_metadata()?; + self.read_lookup_table()?; + + Ok(()) + } + /// Verifies the header of the file and reads and stores the metadata pub fn read_metadata(&mut self) -> Result<&MetaChunk, Error> { if !self.validate_header() { @@ -208,4 +232,4 @@ impl BDFReader { Ok(gen_chunk) } -} \ No newline at end of file +} diff --git a/src/lib.rs b/src/lib.rs index 6b46f94..be857f5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,10 +2,10 @@ mod tests { use super::io::BDFWriter; - use std::io::{BufWriter, Error, BufReader}; - use std::fs::{File, remove_file}; - use crate::chunks::{HashEntry, DataEntry}; + use crate::chunks::{DataEntry, HashEntry}; use crate::io::BDFReader; + use std::fs::{remove_file, File}; + use std::io::{BufReader, BufWriter, Error}; const FOO: &str = "foo"; const BAR: &str = "bar"; @@ -53,8 +53,7 @@ mod tests { entry_2.add_hash_value(FOO.to_string(), vec![4, 5, 2, 3]); writer.add_data_entry(entry_2)?; - writer.flush()?; - writer.flush_writer()?; + writer.finish()?; remove_file("tmp2.bdf")?; @@ -65,8 +64,8 @@ mod tests { fn it_reads() -> Result<(), Error> { create_simple_file("tmp3.bdf", false)?; let mut reader = new_reader("tmp3.bdf")?; - reader.read_metadata()?; - let lookup_table = &reader.read_lookup_table()?.clone(); + reader.read_start()?; + let lookup_table = &reader.lookup_table.clone().unwrap(); let mut next_chunk = reader.next_chunk()?; let data_entries = next_chunk.data_entries(lookup_table)?; assert_eq!(data_entries[0].plain, "lol".to_string()); @@ -91,7 +90,7 @@ mod tests { Ok(()) } - fn create_simple_file(name: &str, compressed: bool) -> Result<(), Error>{ + fn create_simple_file(name: &str, compressed: bool) -> Result<(), Error> { let mut writer = new_writer(name, 1, compressed)?; writer.add_lookup_entry(HashEntry::new(FOO.to_string(), 4))?; @@ -121,4 +120,4 @@ mod tests { } pub mod chunks; -pub mod io; \ No newline at end of file +pub mod io;