finish fixed idbas & start b-tree directories

This commit is contained in:
husky 2023-09-10 19:09:43 -07:00
parent bdd7ba3f36
commit 127f78ce9e
No known key found for this signature in database
GPG key ID: 6B3D8CB511646891
3 changed files with 508 additions and 230 deletions

View file

@ -1,182 +1,215 @@
use alloc::string::{String, ToString};
use alloc::vec;
use alloc::vec::Vec;
use ondisk_btree::{BTree, FromBytes, SizeOf, ToBytes};
use vapfs::{BlockDevice, Index};
use crate::crc32;
use crate::structs::Superblock;
/// # BTd
/// min-degree of all BTrees
pub const BTd: u32 = 3;
/// # BTKeyType
/// key used to index a BTree
pub type BTKeyType = u32;
/// # BTKey
/// whole value of a key in a BTree
#[repr(C)]
#[derive(Copy, Clone)]
pub struct BTKey {
pub key: BTKeyType,
/// length of this entry in bytes
pub length: u32,
/// amount of files represented by this key, used in the case of hash collisions
pub count: u32,
/// if count is 1, this is the inode number of the file represented by this key
/// if count is >1, offset from first BTNode in bytes to an HCList struct
pub value: Index,
/// length of filename
/// zero if count is >1
pub filename_length: u32,
// rest of the struct is the filename
}
/// # HCList
/// list of files represented by a single key in a BTree, used in the case of hash collisions
#[repr(C)]
#[derive(Copy, Clone)]
pub struct HCList {
/// offset from first HCList in bytes to the next HCList
pub next: Index,
/// inode number of the file represented by this HCList
/// # DirectoryListing
/// an entry in a list of all files and directories sharing a common crc32 hash collision (unlikely to happen but just in case!)
#[derive(Debug, Clone)]
pub struct DirectoryListing {
pub name: String,
pub inode: Index,
/// length of filename
pub filename_length: u32,
// rest of the struct is the filename
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct BTNode {
/// offset from first BTNode in bytes, each entry is a DirectoryKey
pub key_array: Index,
pub key_count: u32,
/// offset from first BTNode in bytes, each entry is another BTNode
pub child_array: Index,
pub child_count: u32,
/// # BTreeEntry
/// the aforementioned list, stored in a BTree
#[derive(Debug, Clone, Default)]
pub struct BTreeEntry {
pub crc32: u32,
pub entries: Vec<DirectoryListing>,
}
impl BTKey {
/// in-place conversion from the storage representation (big endian) to the native representation
pub fn convert_big_endian_to_native(&mut self) {
#[cfg(target_endian = "little")]
{
self.key = u32::from_be(self.key);
self.length = u32::from_be(self.length);
self.count = u32::from_be(self.count);
self.value = u64::from_be(self.value);
self.filename_length = u32::from_be(self.filename_length);
}
}
/// in-place conversion from the native representation to the storage representation (big endian)
pub fn convert_native_to_big_endian(&mut self) {
#[cfg(target_endian = "little")]
{
self.key = u32::to_be(self.key);
self.length = u32::to_be(self.length);
self.count = u32::to_be(self.count);
self.value = u64::to_be(self.value);
self.filename_length = u32::to_be(self.filename_length);
}
}
/// offset is from 0x0 in the block device to the beginning of the struct
pub fn read(offset: Index, bd: &mut dyn BlockDevice) -> Option<(Self, String)> {
let mut buf: [u8; core::mem::size_of::<Self>()] = [0; core::mem::size_of::<Self>()];
bd.seek(offset);
let read_count = bd.read_blocks(&mut buf);
if read_count < core::mem::size_of::<Self>() {
return None;
}
let mut key = unsafe { core::ptr::read(buf.as_ptr() as *const Self) };
key.convert_big_endian_to_native();
let filename = key.read_filename(offset, bd);
Some((key, filename))
}
/// offset is from 0x0 in the block device to the beginning of the struct
pub fn read_filename(&self, offset: Index, bd: &mut dyn BlockDevice) -> String {
let mut filename_buf: Vec<u8> = Vec::new();
filename_buf.resize(self.filename_length as usize, 0);
bd.seek(offset);
bd.read_blocks(&mut filename_buf);
String::from_utf8_lossy(unsafe { core::slice::from_raw_parts(filename_buf.as_ptr(), filename_buf.len()) }).to_string()
}
/// # Directory
/// a directory, containing a list of all files and directories in it
#[derive(Clone)]
pub struct Directory {
/// used in the case of b-tree corruption, to restore the b-tree
pub backup_entries: Vec<DirectoryListing>,
/// a crc32 hash of the b-tree
pub crc32: u32,
/// the b-tree
pub btree: BTree<BTreeEntry>,
}
impl BTNode {
pub const fn new(key_array: Index, key_count: u32, child_array: Index, child_count: u32) -> Self {
Self {
key_array,
key_count,
child_array,
child_count,
impl Directory {
/// Reads the directory into memory from the given bytes
pub fn open(bytes: &[u8]) -> Self {
Self::from_bytes(bytes)
}
/// Converts the directory back into bytes for writing to disk
pub fn write(&self) -> Vec<u8> {
self.to_bytes()
}
/// Adds a new entry to the directory listing
pub fn new_entry(&mut self, name: &str, inode: Index) {
let crc32 = crc32::crc32(name.as_bytes());
// check if this crc32 hash already exists
let index = self.btree.search(crc32);
if let Some(node) = index {
if let Some(key) = node.keys.iter_mut().find(|key| key.0 == crc32) {
// if it does, add the new entry to the existing list
key.1.entries.push(DirectoryListing { name: name.to_string(), inode });
}
} else {
// if it doesn't, create a new list
let entries = vec![DirectoryListing { name: name.to_string(), inode }];
self.btree.insert(crc32, BTreeEntry { crc32, entries });
}
}
/// in-place conversion from the storage representation (big endian) to the native representation
pub fn convert_big_endian_to_native(&mut self) {
#[cfg(target_endian = "little")]
{
self.key_array = u64::from_be(self.key_array);
self.key_count = u32::from_be(self.key_count);
self.child_array = u64::from_be(self.child_array);
self.child_count = u32::from_be(self.child_count);
/// Returns an inode for the given entry name
pub fn find(&mut self, name: &str) -> Option<Index> {
let crc32 = crc32::crc32(name.as_bytes());
let index = self.btree.search(crc32);
if let Some(node) = index {
if let Some(key) = node.keys.iter().find(|key| key.0 == crc32) {
return Some(key.1.entries.iter().find(|entry| entry.name == name).unwrap().inode);
}
}
None
}
/// in-place conversion from the native representation to the storage representation (big endian)
pub fn convert_native_to_big_endian(&mut self) {
#[cfg(target_endian = "little")]
{
self.key_array = u64::to_be(self.key_array);
self.key_count = u32::to_be(self.key_count);
self.child_array = u64::to_be(self.child_array);
self.child_count = u32::to_be(self.child_count);
}
}
/// offset is from root in bytes
pub fn read(root: Index, offset: Index, bd: &mut dyn BlockDevice) -> Option<Self> {
let mut buf: [u8; core::mem::size_of::<Self>()] = [0; core::mem::size_of::<Self>()];
bd.seek(root + offset);
let read_count = bd.read_blocks(&mut buf);
if read_count < core::mem::size_of::<Self>() {
return None;
}
let mut node = unsafe { core::ptr::read(buf.as_ptr() as *const Self) };
node.convert_big_endian_to_native();
Some(node)
}
pub fn search(self, root: Index, key_to_find: BTKeyType, bd: &mut dyn BlockDevice) -> Option<(BTKey, String)> {
let mut node = self;
loop {
let mut i = 0;
let mut last_key: Option<BTKey> = None;
while i < node.key_count {
let key_offset = if i == 0 {
node.key_array
} else {
last_key.as_ref().unwrap().length as Index + last_key.as_ref().unwrap().value
};
let key = BTKey::read(key_offset, bd)?;
if key.0.key == key_to_find {
return Some(key);
} else if key.0.key > key_to_find {
break;
/// Removes an entry from the directory listing
pub fn remove_entry(&mut self, name: &str) {
let crc32 = crc32::crc32(name.as_bytes());
let index = self.btree.search(crc32);
if let Some(node) = index {
if let Some(key) = node.keys.iter_mut().find(|key| key.0 == crc32) {
key.1.entries.retain(|entry| entry.name != name);
if key.1.entries.is_empty() {
}
i += 1;
last_key = Some(key.0);
}
if node.child_count == 0 {
return None;
}
let child_offset = if i == 0 {
node.child_array
} else {
last_key.unwrap().value
};
node = BTNode::read(root, child_offset, bd)?;
}
}
/// Returns a list of all entries in the directory
pub fn list(&mut self) -> Vec<DirectoryListing> {
let mut entries = Vec::new();
if let Some(root) = self.btree.root {
for entry in self.btree.traverse_in_order_values(root) {
entries.extend_from_slice(&entry.entries);
}
}
entries
}
/// Rebuilds the b-tree from the backup entries
pub fn rebuild(&mut self) {
self.btree = BTree::new(BTd);
for entry in &self.backup_entries {
let crc32 = crc32::crc32(entry.name.as_bytes());
let index = self.btree.search(crc32);
if let Some(node) = index {
if let Some(key) = node.keys.iter_mut().find(|key| key.0 == crc32) {
key.1.entries.push(entry.clone());
}
} else {
let entries = vec![entry.clone()];
self.btree.insert(crc32, BTreeEntry { crc32, entries });
}
}
}
}
impl ToBytes for Directory {
fn to_bytes(&self) -> Vec<u8> {
let mut bytes = Vec::new();
bytes.extend_from_slice(&(self.backup_entries.len() as u32).to_be_bytes());
for entry in &self.backup_entries {
bytes.extend_from_slice(&(entry.size_of()).to_be_bytes());
bytes.extend_from_slice(&entry.to_bytes());
}
bytes.extend_from_slice(&self.crc32.to_be_bytes());
bytes.extend_from_slice(&self.btree.to_bytes());
bytes
}
}
impl FromBytes for Directory {
fn from_bytes(bytes: &[u8]) -> Self {
let backup_entry_count = u32::from_be_bytes(bytes[0..4].try_into().unwrap()) as usize;
let mut backup_entries = Vec::new();
let mut offset = 4;
for _ in 0..backup_entry_count {
let entry_size = u32::from_be_bytes(bytes[offset..offset + 4].try_into().unwrap()) as usize;
backup_entries.push(DirectoryListing::from_bytes(&bytes[offset + 4..offset + 4 + entry_size]));
offset += 4 + entry_size;
}
let crc32 = u32::from_be_bytes(bytes[offset..offset + 4].try_into().unwrap());
let btree = BTree::from_bytes(&bytes[offset + 4..]);
Self { backup_entries, crc32, btree }
}
}
impl ToBytes for DirectoryListing {
fn to_bytes(&self) -> Vec<u8> {
let mut bytes = Vec::new();
let name_len: u32 = self.name.len() as u32;
bytes.extend_from_slice(&name_len.to_be_bytes());
bytes.extend_from_slice(self.name.as_bytes());
bytes.extend_from_slice(&self.inode.to_be_bytes());
bytes
}
}
impl FromBytes for DirectoryListing {
fn from_bytes(bytes: &[u8]) -> Self {
let name_len = u32::from_be_bytes(bytes[0..4].try_into().unwrap()) as usize;
let name = String::from_utf8(bytes[4..4 + name_len].to_vec()).unwrap();
let inode = Index::from_be_bytes(bytes[4 + name_len..12 + name_len].try_into().unwrap());
Self { name, inode }
}
}
impl SizeOf for DirectoryListing {
fn size_of(&self) -> u32 {
4 + self.name.len() as u32 + 8
}
}
impl ToBytes for BTreeEntry {
fn to_bytes(&self) -> Vec<u8> {
let mut bytes = Vec::new();
bytes.extend_from_slice(&self.crc32.to_be_bytes());
bytes.extend_from_slice(&(self.entries.len() as u32).to_be_bytes());
for entry in &self.entries {
bytes.extend_from_slice(&(entry.size_of()).to_be_bytes());
bytes.extend_from_slice(&entry.to_bytes());
}
bytes
}
}
impl FromBytes for BTreeEntry {
fn from_bytes(bytes: &[u8]) -> Self {
let crc32 = u32::from_be_bytes(bytes[0..4].try_into().unwrap());
let entry_count = u32::from_be_bytes(bytes[4..8].try_into().unwrap()) as usize;
let mut entries = Vec::new();
let mut offset = 8;
for _ in 0..entry_count {
let entry_size = u32::from_be_bytes(bytes[offset..offset + 4].try_into().unwrap()) as usize;
entries.push(DirectoryListing::from_bytes(&bytes[offset + 4..offset + 4 + entry_size]));
offset += 4 + entry_size;
}
Self { crc32, entries }
}
}
impl SizeOf for BTreeEntry {
fn size_of(&self) -> u32 {
let mut size = 8;
for entry in &self.entries {
size += 4 + entry.size_of();
}
size
}
}

View file

@ -12,6 +12,7 @@ pub mod btree;
pub mod structs;
pub mod bitmap;
pub mod crc32;
pub mod safe;
/// Reads the superblock (located at byte offset 1024 of the block device) and returns it.
/// Returns None if the block device is too small to contain a superblock.
@ -1053,8 +1054,8 @@ pub fn flush_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry
}
let mut inode = inode.unwrap();
// target block is either an index into the direct blocks or an indirect block (if greater than 11)
if content.target_block < 12 {
let previous_block = inode.direct_block_addresses[content.target_block as usize];
if content.target_block < 32 {
let previous_block = inode.listblock.direct_block_addresses[content.target_block as usize];
// update the journal entry
journal_entry.content.block_write.real_target_block = previous_block;
@ -1062,47 +1063,65 @@ pub fn flush_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry
return false;
}
inode.direct_block_addresses[content.target_block as usize] = content.source_block;
inode.listblock.direct_block_addresses[content.target_block as usize] = content.source_block;
// if target_block is greater than block_count, we need to update block_count and the inode's byte count
if content.target_block >= inode.block_count {
inode.block_count = content.target_block + 1;
inode.size = inode.block_count * sb.block_size as u64;
inode.listblock.count = inode.block_count;
// note: we are assuming here that we are only increasing the size by 1,
// greater size increases may result in weird corruption
// (undefined dbas will be listed as part of the file, which will either result
// in garbage data or leakage from another file)
// fixme: in the future, we should check for this case and allocate blocks if needed
}
// update the inode
if !unsafe { write_inode(content.target_inode, sb, bd, inode) } {
return false;
}
} else {
let conversion_needed = inode.flags & InodeFlags::INDIRECT as u32 == 0;
// figure out which indirect block we need to write to (either 1, 2, or 3)
// range 12..(12*2) is indirect block 1
// range (12*2)..(12*3) is indirect block 2
// range (12*3)..(12*4) is indirect block 3
let indirect_block_index = (content.target_block - 12) / 12;
let indirect_block_offset = (content.target_block - 12) % 12;
let indirect_block = inode.direct_block_addresses[indirect_block_index as usize];
let mut indirect_block_buf = read_datablock(indirect_block, sb, bd);
// get the count
let mut count = u64::from_be_bytes(indirect_block_buf.as_slice()[0..8].try_into().unwrap());
// place the source block at index (indirect_block_offset * 8) + 8
let target_index = (indirect_block_offset * 8) + 8;
// if there's already a block at the target index, we need to update the journal entry
if indirect_block_offset < count {
// update the journal entry
journal_entry.content.block_write.real_target_block = u64::from_be_bytes(indirect_block_buf.as_slice()[target_index as usize..(target_index + 8) as usize].try_into().unwrap());
if !unsafe { write_journal_entry(entry_index, sb, bd, journal_entry) } {
return false;
}
}
indirect_block_buf.as_mut_slice()[target_index as usize..(target_index + 8) as usize].clone_from_slice(&content.source_block.to_be_bytes());
// update the count
if count < indirect_block_offset + 1 {
count = indirect_block_offset + 1;
}
indirect_block_buf.as_mut_slice()[0..8].clone_from_slice(&count.to_be_bytes());
// write the indirect block back to the block device
if !unsafe { write_datablock(indirect_block, sb, bd, &indirect_block_buf) } {
let res = get_indirect_datablock(
sb, bd, inode.listblock, content.target_block);
if res == None {
return false;
}
let (indirect_block_addr, indirect_block_index) = res.unwrap();
let mut indirect_block = read_datablock(indirect_block_addr, sb, bd);
let mut buf = [0u8; 8];
buf.copy_from_slice(&indirect_block[(indirect_block_index * 8) as usize..(indirect_block_index * 8 + 8) as usize]);
let previous_block = u64::from_be_bytes(buf);
// update the journal entry
journal_entry.content.block_write.real_target_block = previous_block;
if !unsafe { write_journal_entry(entry_index, sb, bd, journal_entry) } {
return false;
}
// update the indirect block
buf.copy_from_slice(&content.source_block.to_be_bytes());
indirect_block[(indirect_block_index * 8) as usize..(indirect_block_index * 8 + 8) as usize].copy_from_slice(&buf);
if !unsafe { write_datablock(indirect_block_addr, sb, bd, &indirect_block) } {
return false;
}
// if target_block is greater than block_count, we need to update block_count and the inode's byte count
if content.target_block >= inode.block_count {
inode.block_count = content.target_block + 1;
inode.size = inode.block_count * sb.block_size as u64;
inode.listblock.count = inode.block_count;
// note: see note above
}
// update the inode
if !unsafe { write_inode(content.target_inode, sb, bd, inode) } {
return false;
}
}
} else if content.target_type == JBRTargetType::Disk as u32 {
// copy the data directly to the offset on the disk
@ -1251,10 +1270,9 @@ pub fn flush_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry_
// get dbas of new list block
let buf = read_datablock(content.list_block, sb, bd);
let list_block = unsafe { core::ptr::read(buf.as_ptr() as *const ListBlock) };
let dba = list_block.direct_block_addresses;
// update inode
let mut inode = inode;
inode.direct_block_addresses = dba;
inode.listblock = list_block;
if !unsafe { write_inode(content.target_inode, sb, bd, inode) } {
return false;
}
@ -1277,48 +1295,275 @@ pub fn flush_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry_
let old_list_block = read_datablock(content.old_list_block, sb, bd);
let old_list_block = unsafe { core::ptr::read(old_list_block.as_ptr() as *const ListBlock) };
for i in 0..9 {
if old_list_block.direct_block_addresses[i] != 0 {
unused_datablocks.push(old_list_block.direct_block_addresses[i]);
let mut taken = 0;
for i in 0..32 {
if old_list_block.count > i {
unused_datablocks.push(old_list_block.direct_block_addresses[i as usize]);
taken += 1;
}
}
for x in 0..3 {
if !old_list_block.using_indirect_blocks {
unused_datablocks.push(old_list_block.direct_block_addresses[x + 9]);
} else {
// read indirect block
let mut deallocation_queue: Vec<Index> = Vec::new();
let mut buf = vec![];
let mut ptr = old_list_block.direct_block_addresses[x + 9];
deallocation_queue.push(ptr);
while !deallocation_queue.is_empty() {
// read indirect block
buf = read_datablock(ptr, sb, bd);
let mut head = 0;
let count = u64::from_be_bytes(buf[head..head + 8].try_into().unwrap()) as usize;
head += 8;
for i in 0..count {
let is_data: bool = buf[head] != 0;
let mut depth_no_data = buf[head..head + 8].to_vec();
depth_no_data[0] = 0;
let depth = u64::from_be_bytes(depth_no_data.try_into().unwrap()) as usize;
head += 8;
let new_ptr = u64::from_be_bytes(buf[head..head + 8].try_into().unwrap());
if !is_data {
deallocation_queue.push(new_ptr);
} else {
unused_datablocks.push(new_ptr);
}
}
// deallocate this block
unused_datablocks.push(ptr);
// get next block
if let Some(next_ptr) = deallocation_queue.pop() {
ptr = next_ptr;
} else {
let max_per_block = sb.block_size as u64 / 8;
let N = max_per_block * 32;
let N2 = N * N;
let N3 = N2 * N;
let N4 = N3 * N;
let N5 = N4 * N;
let N6 = N5 * N;
// enclosed to make collapsable
// todo! we should refactor this code eventually so that we don't have
// multi-hundred line long things cluttering everything like this
{
fn pushunused1(
sb: &Superblock, bd: &mut dyn BlockDevice, list_block: &[Index], old_count: usize,
unused_datablocks: &mut Vec<Index>, taken: &mut usize, max1: usize, max2: usize,
max_per_block: u64) -> bool {
for block2 in 0..max2 {
if *taken >= old_count {
break;
}
let buf = read_datablock(list_block[block2], sb, bd);
let mut j = 0;
let mut list = vec![0u64; max_per_block as usize];
while j < max_per_block {
if *taken + j as usize >= old_count {
break;
}
let mut buf2 = [0u8; 8];
buf2.copy_from_slice(&buf[j as usize * 8..(j * 8 + 8) as usize]);
let dba = u64::from_be_bytes(buf2);
if dba != 0 {
list[j as usize] = dba;
}
j += 1;
}
for block1 in 0..max1 {
if unused_datablocks.len() >= *taken {
break;
}
let buf = read_datablock(list[block1], sb, bd);
let mut j = 0;
while j < max_per_block {
if unused_datablocks.len() >= *taken {
break;
}
let mut buf2 = [0u8; 8];
buf2.copy_from_slice(&buf[j as usize * 8..(j * 8 + 8) as usize]);
let dba = u64::from_be_bytes(buf2);
if dba != 0 {
unused_datablocks.push(dba);
*taken += 1;
}
j += 1;
}
}
unused_datablocks.push(list_block[block2]);
*taken += 1;
}
true
}
// double indirect blocks
if !pushunused1(
sb, bd, &old_list_block.double_indirect_block_address, old_list_block.count as usize,
&mut unused_datablocks, &mut taken, max_per_block as usize, 32, max_per_block) {
return false;
}
// triple indirect blocks
fn pushunused2(
sb: &Superblock, bd: &mut dyn BlockDevice, list_block: &[Index], old_count: usize,
unused_datablocks: &mut Vec<Index>, taken: &mut usize, max1: usize, max2: usize,
max3: usize, max_per_block: u64) -> bool {
for block3 in 0..max3 {
if *taken >= old_count {
break;
}
let buf = read_datablock(list_block[block3], sb, bd);
let mut j = 0;
let mut list = vec![0u64; max_per_block as usize];
while j < max_per_block {
if *taken + j as usize >= old_count {
break;
}
let mut buf2 = [0u8; 8];
buf2.copy_from_slice(&buf[j as usize * 8..(j * 8 + 8) as usize]);
let dba = u64::from_be_bytes(buf2);
if dba != 0 {
list[j as usize] = dba;
}
j += 1;
}
if !pushunused1(
sb, bd, &list, old_count,
unused_datablocks, taken, max1, max2, max_per_block) {
return false;
}
unused_datablocks.push(list_block[block3]);
*taken += 1;
}
true
}
if !pushunused2(
sb, bd, &old_list_block.triple_indirect_block_address, old_list_block.count as usize,
&mut unused_datablocks, &mut taken, max_per_block as usize, max_per_block as usize, 32, max_per_block) {
return false;
}
// quadruple indirect blocks
for block4 in 0..32 {
if taken >= old_list_block.count as usize {
break;
}
let buf = read_datablock(old_list_block.quadruple_indirect_block_address[block4 as usize], sb, bd);
let mut j = 0;
let mut list = vec![0u64; max_per_block as usize];
while j < max_per_block {
if taken >= old_list_block.count as usize {
break;
}
let mut buf2 = [0u8; 8];
buf2.copy_from_slice(&buf[j as usize * 8..(j * 8 + 8) as usize]);
let dba = u64::from_be_bytes(buf2);
if dba != 0 {
list[j as usize] = dba;
}
j += 1;
}
if !pushunused2(
sb, bd, &list, old_list_block.count as usize,
&mut unused_datablocks, &mut taken, max_per_block as usize, max_per_block as usize, max_per_block as usize, max_per_block) {
return false;
}
unused_datablocks.push(old_list_block.quadruple_indirect_block_address[block4 as usize]);
taken += 1;
}
// quintuple indirect blocks
for block5 in 0..32 {
if taken >= old_list_block.count as usize {
break;
}
let buf = read_datablock(old_list_block.quintuple_indirect_block_address[block5 as usize], sb, bd);
let mut j = 0;
let mut list = vec![0u64; max_per_block as usize];
while j < max_per_block {
if taken >= old_list_block.count as usize {
break;
}
let mut buf2 = [0u8; 8];
buf2.copy_from_slice(&buf[j as usize * 8..(j * 8 + 8) as usize]);
let dba = u64::from_be_bytes(buf2);
if dba != 0 {
list[j as usize] = dba;
}
j += 1;
}
for block4 in 0..max_per_block {
if taken >= old_list_block.count as usize {
break;
}
let buf = read_datablock(list[block4 as usize], sb, bd);
let mut j = 0;
let mut list = vec![0u64; max_per_block as usize];
while j < max_per_block {
if taken >= old_list_block.count as usize {
break;
}
let mut buf2 = [0u8; 8];
buf2.copy_from_slice(&buf[j as usize * 8..(j * 8 + 8) as usize]);
let dba = u64::from_be_bytes(buf2);
if dba != 0 {
list[j as usize] = dba;
}
j += 1;
}
if !pushunused2(
sb, bd, &list, old_list_block.count as usize,
&mut unused_datablocks, &mut taken, max_per_block as usize, max_per_block as usize, max_per_block as usize, max_per_block) {
return false;
}
unused_datablocks.push(list[block4 as usize]);
taken += 1;
}
unused_datablocks.push(old_list_block.quintuple_indirect_block_address[block5 as usize]);
taken += 1;
}
// sextuple indirect blocks
for block6 in 0..32 {
if taken >= old_list_block.count as usize {
break;
}
let buf = read_datablock(old_list_block.sextuple_indirect_block_address[block6 as usize], sb, bd);
let mut j = 0;
let mut list = vec![0u64; max_per_block as usize];
while j < max_per_block {
if taken >= old_list_block.count as usize {
break;
}
let mut buf2 = [0u8; 8];
buf2.copy_from_slice(&buf[j as usize * 8..(j * 8 + 8) as usize]);
let dba = u64::from_be_bytes(buf2);
if dba != 0 {
list[j as usize] = dba;
}
j += 1;
}
for block5 in 0..max_per_block {
if taken >= old_list_block.count as usize {
break;
}
let buf = read_datablock(list[block5 as usize], sb, bd);
let mut j = 0;
let mut list = vec![0u64; max_per_block as usize];
while j < max_per_block {
if taken >= old_list_block.count as usize {
break;
}
let mut buf2 = [0u8; 8];
buf2.copy_from_slice(&buf[j as usize * 8..(j * 8 + 8) as usize]);
let dba = u64::from_be_bytes(buf2);
if dba != 0 {
list[j as usize] = dba;
}
j += 1;
}
for block4 in 0..max_per_block {
if taken >= old_list_block.count as usize {
break;
}
let buf = read_datablock(list[block4 as usize], sb, bd);
let mut j = 0;
let mut list = vec![0u64; max_per_block as usize];
while j < max_per_block {
if taken >= old_list_block.count as usize {
break;
}
let mut buf2 = [0u8; 8];
buf2.copy_from_slice(&buf[j as usize * 8..(j * 8 + 8) as usize]);
let dba = u64::from_be_bytes(buf2);
if dba != 0 {
list[j as usize] = dba;
}
j += 1;
}
if !pushunused2(
sb, bd, &list, old_list_block.count as usize,
&mut unused_datablocks, &mut taken, max_per_block as usize, max_per_block as usize, max_per_block as usize, max_per_block) {
return false;
}
unused_datablocks.push(list[block4 as usize]);
taken += 1;
}
unused_datablocks.push(list[block5 as usize]);
taken += 1;
}
unused_datablocks.push(old_list_block.sextuple_indirect_block_address[block6 as usize]);
taken += 1;
}
}

0
src/safe.rs Normal file
View file