[space_map (rust)] Do not use an extra block set for building metadata sm
The blocks storing metadata itself are located continuously within a certain reserved range, hence there's no need to use a block set as the representation.
This commit is contained in:
parent
7ab97a9aae
commit
c71132c056
@ -1,7 +1,6 @@
|
|||||||
use anyhow::Result;
|
use anyhow::{anyhow, Result};
|
||||||
use byteorder::{LittleEndian, WriteBytesExt};
|
use byteorder::{LittleEndian, WriteBytesExt};
|
||||||
use nom::{number::complete::*, IResult};
|
use nom::{number::complete::*, IResult};
|
||||||
use std::collections::BTreeMap;
|
|
||||||
use std::io::Cursor;
|
use std::io::Cursor;
|
||||||
|
|
||||||
use crate::checksum;
|
use crate::checksum;
|
||||||
@ -66,23 +65,28 @@ fn block_to_bitmap(b: u64) -> usize {
|
|||||||
(b / ENTRIES_PER_BITMAP as u64) as usize
|
(b / ENTRIES_PER_BITMAP as u64) as usize
|
||||||
}
|
}
|
||||||
|
|
||||||
fn adjust_counts(w: &mut WriteBatcher, ie: &IndexEntry, allocs: &[u64]) -> Result<IndexEntry> {
|
fn adjust_counts(
|
||||||
|
w: &mut WriteBatcher,
|
||||||
|
ie: &IndexEntry,
|
||||||
|
begin: u64,
|
||||||
|
end: u64,
|
||||||
|
) -> Result<IndexEntry> {
|
||||||
use BitmapEntry::*;
|
use BitmapEntry::*;
|
||||||
|
|
||||||
let mut first_free = ie.none_free_before;
|
let mut first_free = ie.none_free_before;
|
||||||
let nr_free = ie.nr_free - allocs.len() as u32;
|
let nr_free = ie.nr_free - (end - begin) as u32;
|
||||||
|
|
||||||
// Read the bitmap
|
// Read the bitmap
|
||||||
let bitmap_block = w.engine.read(ie.blocknr)?;
|
let bitmap_block = w.engine.read(ie.blocknr)?;
|
||||||
let (_, mut bitmap) = Bitmap::unpack(bitmap_block.get_data())?;
|
let (_, mut bitmap) = Bitmap::unpack(bitmap_block.get_data())?;
|
||||||
|
|
||||||
// Update all the entries
|
// Update all the entries
|
||||||
for a in allocs {
|
for a in begin..end {
|
||||||
if first_free == *a as u32 {
|
if first_free == a as u32 {
|
||||||
first_free = *a as u32 + 1;
|
first_free = a as u32 + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
bitmap.entries[*a as usize] = Small(1);
|
bitmap.entries[a as usize] = Small(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write the bitmap
|
// Write the bitmap
|
||||||
@ -99,25 +103,32 @@ fn adjust_counts(w: &mut WriteBatcher, ie: &IndexEntry, allocs: &[u64]) -> Resul
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn write_metadata_sm(w: &mut WriteBatcher) -> Result<SMRoot> {
|
pub fn write_metadata_sm(w: &mut WriteBatcher) -> Result<SMRoot> {
|
||||||
w.clear_allocations();
|
let r1 = w.get_reserved_range();
|
||||||
|
|
||||||
let (mut indexes, ref_count_root) = write_metadata_common(w)?;
|
let (mut indexes, ref_count_root) = write_metadata_common(w)?;
|
||||||
|
|
||||||
let bitmap_root = w.alloc_zeroed()?;
|
let bitmap_root = w.alloc_zeroed()?;
|
||||||
|
|
||||||
// Now we need to patch up the counts for the metadata that was used for storing
|
// Now we need to patch up the counts for the metadata that was used for storing
|
||||||
// the space map itself. These ref counts all went from 0 to 1.
|
// the space map itself. These ref counts all went from 0 to 1.
|
||||||
let allocations = w.clear_allocations();
|
let r2 = w.get_reserved_range();
|
||||||
|
if r2.end < r1.end {
|
||||||
// Sort the allocations by bitmap
|
return Err(anyhow!("unsupported allocation pattern"));
|
||||||
let mut by_bitmap = BTreeMap::new();
|
|
||||||
for b in allocations {
|
|
||||||
let bitmap = block_to_bitmap(b);
|
|
||||||
(*by_bitmap.entry(bitmap).or_insert_with(Vec::new)).push(b % ENTRIES_PER_BITMAP as u64);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (bitmap, allocs) in by_bitmap {
|
let bi_begin = block_to_bitmap(r1.end);
|
||||||
indexes[bitmap] = adjust_counts(w, &indexes[bitmap], &allocs)?;
|
let bi_end = block_to_bitmap(r2.end) + 1;
|
||||||
|
for (bm, ie) in indexes.iter_mut().enumerate().take(bi_end).skip(bi_begin) {
|
||||||
|
let begin = if bm == bi_begin {
|
||||||
|
r1.end % ENTRIES_PER_BITMAP as u64
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
let end = if bm == bi_end - 1 {
|
||||||
|
r2.end % ENTRIES_PER_BITMAP as u64
|
||||||
|
} else {
|
||||||
|
ENTRIES_PER_BITMAP as u64
|
||||||
|
};
|
||||||
|
*ie = adjust_counts(w, ie, begin, end)?
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write out the metadata index
|
// Write out the metadata index
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
use anyhow::{anyhow, Result};
|
use anyhow::{anyhow, Result};
|
||||||
use std::collections::BTreeSet;
|
|
||||||
use std::ops::DerefMut;
|
use std::ops::DerefMut;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
@ -19,9 +18,6 @@ pub struct WriteBatcher {
|
|||||||
batch_size: usize,
|
batch_size: usize,
|
||||||
queue: Vec<Block>,
|
queue: Vec<Block>,
|
||||||
|
|
||||||
// The actual blocks allocated or reserved by this WriteBatcher
|
|
||||||
allocations: BTreeSet<u64>,
|
|
||||||
|
|
||||||
// The reserved range covers all the blocks allocated or reserved by this
|
// The reserved range covers all the blocks allocated or reserved by this
|
||||||
// WriteBatcher, and the blocks already occupied. No blocks in this range
|
// WriteBatcher, and the blocks already occupied. No blocks in this range
|
||||||
// are expected to be freed, hence a single range is used for the representation.
|
// are expected to be freed, hence a single range is used for the representation.
|
||||||
@ -60,7 +56,6 @@ impl WriteBatcher {
|
|||||||
sm,
|
sm,
|
||||||
batch_size,
|
batch_size,
|
||||||
queue: Vec::with_capacity(batch_size),
|
queue: Vec::with_capacity(batch_size),
|
||||||
allocations: BTreeSet::new(),
|
|
||||||
reserved: std::ops::Range {
|
reserved: std::ops::Range {
|
||||||
start: alloc_begin,
|
start: alloc_begin,
|
||||||
end: alloc_begin,
|
end: alloc_begin,
|
||||||
@ -72,7 +67,6 @@ impl WriteBatcher {
|
|||||||
let mut sm = self.sm.lock().unwrap();
|
let mut sm = self.sm.lock().unwrap();
|
||||||
let b = find_free(sm.deref_mut(), &self.reserved)?;
|
let b = find_free(sm.deref_mut(), &self.reserved)?;
|
||||||
self.reserved.end = b + 1;
|
self.reserved.end = b + 1;
|
||||||
self.allocations.insert(b);
|
|
||||||
|
|
||||||
sm.set(b, 1)?;
|
sm.set(b, 1)?;
|
||||||
|
|
||||||
@ -83,7 +77,6 @@ impl WriteBatcher {
|
|||||||
let mut sm = self.sm.lock().unwrap();
|
let mut sm = self.sm.lock().unwrap();
|
||||||
let b = find_free(sm.deref_mut(), &self.reserved)?;
|
let b = find_free(sm.deref_mut(), &self.reserved)?;
|
||||||
self.reserved.end = b + 1;
|
self.reserved.end = b + 1;
|
||||||
self.allocations.insert(b);
|
|
||||||
|
|
||||||
sm.set(b, 1)?;
|
sm.set(b, 1)?;
|
||||||
|
|
||||||
@ -94,7 +87,6 @@ impl WriteBatcher {
|
|||||||
let mut sm = self.sm.lock().unwrap();
|
let mut sm = self.sm.lock().unwrap();
|
||||||
let b = find_free(sm.deref_mut(), &self.reserved)?;
|
let b = find_free(sm.deref_mut(), &self.reserved)?;
|
||||||
self.reserved.end = b + 1;
|
self.reserved.end = b + 1;
|
||||||
self.allocations.insert(b);
|
|
||||||
|
|
||||||
Ok(Block::new(b))
|
Ok(Block::new(b))
|
||||||
}
|
}
|
||||||
@ -103,17 +95,10 @@ impl WriteBatcher {
|
|||||||
let mut sm = self.sm.lock().unwrap();
|
let mut sm = self.sm.lock().unwrap();
|
||||||
let b = find_free(sm.deref_mut(), &self.reserved)?;
|
let b = find_free(sm.deref_mut(), &self.reserved)?;
|
||||||
self.reserved.end = b + 1;
|
self.reserved.end = b + 1;
|
||||||
self.allocations.insert(b);
|
|
||||||
|
|
||||||
Ok(Block::zeroed(b))
|
Ok(Block::zeroed(b))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn clear_allocations(&mut self) -> BTreeSet<u64> {
|
|
||||||
let mut tmp = BTreeSet::new();
|
|
||||||
std::mem::swap(&mut tmp, &mut self.allocations);
|
|
||||||
tmp
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_reserved_range(&self) -> std::ops::Range<u64> {
|
pub fn get_reserved_range(&self) -> std::ops::Range<u64> {
|
||||||
std::ops::Range {
|
std::ops::Range {
|
||||||
start: self.reserved.start,
|
start: self.reserved.start,
|
||||||
|
Loading…
Reference in New Issue
Block a user