@ -1,16 +1,19 @@
use byteorder ::{ BigEndian , ByteOrder } ;
use crate ::scan ::ScanStatistics ;
use std ::io ::{ Read , Result } ;
use byteorder ::{ BigEndian , ByteOrder , ReadBytesExt } ;
use std ::fs ::File ;
use std ::io ::{ BufReader , BufWriter , Read , Result , Seek , SeekFrom , Write } ;
const BLOCK_SIZE : usize = 4096 ;
const BLOCK_SIZE : usize = 4096 ;
pub struct RegionFile {
pub struct RegionFile {
reader : Box < dyn Read > ,
reader : BufReader < File > ,
locations : Locations ,
locations : Locations ,
#[ allow(dead_code) ]
timestamps : Timestamps ,
timestamps : Timestamps ,
}
}
impl RegionFile {
impl RegionFile {
pub fn new ( reader : Box < dyn Read > ) -> Result < Self > {
pub fn new ( reader : BufReader < File > ) -> Result < Self > {
let mut locations_raw = [ 0 u8 ; BLOCK_SIZE ] ;
let mut locations_raw = [ 0 u8 ; BLOCK_SIZE ] ;
let mut timestamps_raw = [ 0 u8 ; BLOCK_SIZE ] ;
let mut timestamps_raw = [ 0 u8 ; BLOCK_SIZE ] ;
let mut reader = reader ;
let mut reader = reader ;
@ -24,20 +27,67 @@ impl RegionFile {
} )
} )
}
}
/// Writes a corrected version of the region file back to the disk
pub fn write ( & self , writer : & mut BufWriter < File > ) -> Result < ( ) > {
let location_bytes = self . locations . to_bytes ( ) ;
writer . write_all ( & location_bytes . as_slice ( ) ) ? ;
writer . flush ( )
}
/// Returns the number of chunks in the file
/// Returns the number of chunks in the file
pub fn count_chunks ( & self ) -> usize {
pub fn count_chunks ( & self ) -> usize {
let mut count = 0 ;
return self . locations . valid_entries ( ) . len ( ) ;
for x in 0 .. 32 {
}
for z in 0 .. 32 {
if ! ( self . locations . get_chunk_offset ( x , z ) = = Some ( 0 )
/// Scans the chunk entries for possible errors
& & self . locations . get_chunk_sectors ( x , z ) = = Some ( 0 ) )
pub fn scan_chunks ( & mut self ) -> Result < ScanStatistics > {
{
let mut statistic = ScanStatistics ::new ( ) ;
count + = 1 ;
let entries = self . locations . valid_entries ( ) ;
let mut corrected_entries = Vec ::new ( ) ;
statistic . total_chunks = entries . len ( ) as u64 ;
for ( offset , sections ) in & entries {
self . reader
. seek ( SeekFrom ::Start ( * offset as u64 * BLOCK_SIZE as u64 ) ) ? ;
match self . read_chunk ( ) {
Ok ( chunk ) = > {
let chunk_sections = ( ( chunk . length + 4 ) as f64 / BLOCK_SIZE as f64 ) . ceil ( ) ;
if * sections ! = chunk_sections as u8 | | chunk . length > = 1_048_576 {
statistic . invalid_length + = 1 ;
corrected_entries . push ( ( * offset , chunk_sections as u8 ) ) ;
} else {
corrected_entries . push ( ( * offset , * sections ) ) ;
}
}
Err ( e ) = > {
println! ( "Failed to read chunk at {}: {}" , offset , e ) ;
}
}
}
}
}
}
self . locations . set_entries ( corrected_entries ) ;
return count ;
Ok ( statistic )
}
/// Reads a chunk at the current location
fn read_chunk ( & mut self ) -> Result < Chunk > {
let mut length_raw = [ 0 u8 ; 4 ] ;
self . reader . read_exact ( & mut length_raw ) ? ;
let length = BigEndian ::read_u32 ( & length_raw ) ;
let compression_type = self . reader . read_u8 ( ) ? ;
if length > 0 {
self . reader . seek ( SeekFrom ::Current ( ( length - 1 ) as i64 ) ) ? ;
} else {
self . reader . seek ( SeekFrom ::Current ( ( length ) as i64 ) ) ? ;
}
Ok ( Chunk {
length ,
compression_type ,
} )
}
}
}
}
@ -51,8 +101,8 @@ impl Locations {
let mut locations = Vec ::new ( ) ;
let mut locations = Vec ::new ( ) ;
for i in ( 0 .. BLOCK_SIZE - 1 ) . step_by ( 4 ) {
for i in ( 0 .. BLOCK_SIZE - 1 ) . step_by ( 4 ) {
let mut offset = BigEndian ::read_u32 ( & bytes [ i .. i + 4 ] ) ;
let offset_raw = [ 0 u8 , bytes [ i ] , bytes [ i + 1 ] , bytes [ i + 2 ] ] ;
offset = offset > > 1 ;
let offset = BigEndian ::read_u32 ( & offset_raw ) ;
let count = bytes [ i + 3 ] ;
let count = bytes [ i + 3 ] ;
locations . push ( ( offset , count ) ) ;
locations . push ( ( offset , count ) ) ;
}
}
@ -60,6 +110,20 @@ impl Locations {
Self { inner : locations }
Self { inner : locations }
}
}
/// Returns the byte representation of the locations table
pub fn to_bytes ( & self ) -> Vec < u8 > {
let mut bytes = Vec ::new ( ) ;
for ( offset , sections ) in & self . inner {
let mut offset_raw = [ 0 u8 ; 4 ] ;
BigEndian ::write_u32 ( & mut offset_raw , * offset ) ;
bytes . append ( & mut offset_raw [ 1 .. 4 ] . to_vec ( ) ) ;
bytes . push ( * sections ) ;
}
bytes
}
/// Returns the offset of a chunk
/// Returns the offset of a chunk
pub fn get_chunk_offset ( & self , x : usize , z : usize ) -> Option < u32 > {
pub fn get_chunk_offset ( & self , x : usize , z : usize ) -> Option < u32 > {
let index = x % 32 + ( z % 32 ) * 32 ;
let index = x % 32 + ( z % 32 ) * 32 ;
@ -71,6 +135,19 @@ impl Locations {
let index = x % 32 + ( z % 32 ) * 32 ;
let index = x % 32 + ( z % 32 ) * 32 ;
self . inner . get ( index ) . map ( | e | ( * e ) . 1 )
self . inner . get ( index ) . map ( | e | ( * e ) . 1 )
}
}
/// Returns chunk entry list
pub fn valid_entries ( & self ) -> Vec < ( u32 , u8 ) > {
self . inner
. iter ( )
. filter_map ( | e | if ( * e ) . 0 > = 2 { Some ( * e ) } else { None } )
. collect ( )
}
/// Replaces the entry list with a new one
pub fn set_entries ( & mut self , entries : Vec < ( u32 , u8 ) > ) {
self . inner = entries ;
}
}
}
#[ derive(Debug) ]
#[ derive(Debug) ]
@ -89,3 +166,9 @@ impl Timestamps {
Self { inner : timestamps }
Self { inner : timestamps }
}
}
}
}
#[ derive(Debug) ]
pub struct Chunk {
pub length : u32 ,
pub compression_type : u8 ,
}