Merge pull request #177 from mingnus/2021-06-03-cache-restore-fixes
Fix restoration tools
This commit is contained in:
commit
8e609458c2
@ -41,3 +41,6 @@ quickcheck_macros = "0.9"
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
|
||||
[features]
|
||||
rust_tests = []
|
||||
|
@ -10,24 +10,38 @@ use thinp::cache::dump::{dump, CacheDumpOptions};
|
||||
fn main() {
|
||||
let parser = App::new("cache_dump")
|
||||
.version(thinp::version::tools_version())
|
||||
.arg(
|
||||
Arg::with_name("INPUT")
|
||||
.help("Specify the input device to check")
|
||||
.required(true)
|
||||
.index(1),
|
||||
)
|
||||
.about("Dump the cache metadata to stdout in XML format")
|
||||
.arg(
|
||||
Arg::with_name("REPAIR")
|
||||
.help("")
|
||||
.long("repair")
|
||||
.value_name("REPAIR"),
|
||||
.help("Repair the metadata whilst dumping it")
|
||||
.short("r")
|
||||
.long("repair"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("OUTPUT")
|
||||
.help("Specify the output file rather than stdout")
|
||||
.short("o")
|
||||
.long("output")
|
||||
.value_name("OUTPUT"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("INPUT")
|
||||
.help("Specify the input device to dump")
|
||||
.required(true)
|
||||
.index(1),
|
||||
);
|
||||
|
||||
let matches = parser.get_matches();
|
||||
let input_file = Path::new(matches.value_of("INPUT").unwrap());
|
||||
let output_file = if matches.is_present("OUTPUT") {
|
||||
Some(Path::new(matches.value_of("OUTPUT").unwrap()))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let opts = CacheDumpOptions {
|
||||
dev: &input_file,
|
||||
input: input_file,
|
||||
output: output_file,
|
||||
async_io: false,
|
||||
repair: matches.is_present("REPAIR"),
|
||||
};
|
||||
|
@ -25,14 +25,12 @@ fn main() {
|
||||
.arg(
|
||||
Arg::with_name("SB_ONLY")
|
||||
.help("Only check the superblock.")
|
||||
.long("super-block-only")
|
||||
.value_name("SB_ONLY"),
|
||||
.long("super-block-only"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("SKIP_MAPPINGS")
|
||||
.help("Don't check the mapping tree")
|
||||
.long("skip-mappings")
|
||||
.value_name("SKIP_MAPPINGS"),
|
||||
.long("skip-mappings"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("AUTO_REPAIR")
|
||||
@ -47,7 +45,7 @@ fn main() {
|
||||
.arg(
|
||||
Arg::with_name("CLEAR_NEEDS_CHECK")
|
||||
.help("Clears the 'needs_check' flag in the superblock")
|
||||
.long("clear-needs-check"),
|
||||
.long("clear-needs-check-flag"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("OVERRIDE_MAPPING_ROOT")
|
||||
|
@ -12,9 +12,9 @@ use thinp::report::*;
|
||||
use thinp::thin::dump::{dump, ThinDumpOptions};
|
||||
|
||||
fn main() {
|
||||
let parser = App::new("thin_check")
|
||||
let parser = App::new("thin_dump")
|
||||
.version(thinp::version::tools_version())
|
||||
.about("Validates thin provisioning metadata on a device or file.")
|
||||
.about("Dump thin-provisioning metadata to stdout in XML format")
|
||||
.arg(
|
||||
Arg::with_name("QUIET")
|
||||
.help("Suppress output messages, return only exit code.")
|
||||
@ -22,60 +22,49 @@ fn main() {
|
||||
.long("quiet"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("SB_ONLY")
|
||||
.help("Only check the superblock.")
|
||||
.long("super-block-only")
|
||||
.value_name("SB_ONLY"),
|
||||
Arg::with_name("REPAIR")
|
||||
.help("Repair the metadata whilst dumping it")
|
||||
.short("r")
|
||||
.long("repair"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("SKIP_MAPPINGS")
|
||||
.help("Don't check the mapping tree")
|
||||
.long("skip-mappings")
|
||||
.value_name("SKIP_MAPPINGS"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("AUTO_REPAIR")
|
||||
.help("Auto repair trivial issues.")
|
||||
.long("auto-repair"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("IGNORE_NON_FATAL")
|
||||
.help("Only return a non-zero exit code if a fatal error is found.")
|
||||
.long("ignore-non-fatal-errors"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("CLEAR_NEEDS_CHECK")
|
||||
.help("Clears the 'needs_check' flag in the superblock")
|
||||
.long("clear-needs-check"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("OVERRIDE_MAPPING_ROOT")
|
||||
.help("Specify a mapping root to use")
|
||||
.long("override-mapping-root")
|
||||
.value_name("OVERRIDE_MAPPING_ROOT")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("METADATA_SNAPSHOT")
|
||||
.help("Check the metadata snapshot on a live pool")
|
||||
.short("m")
|
||||
.long("metadata-snapshot")
|
||||
.value_name("METADATA_SNAPSHOT"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("INPUT")
|
||||
.help("Specify the input device to check")
|
||||
.required(true)
|
||||
.index(1),
|
||||
.help("Do not dump the mappings")
|
||||
.long("skip-mappings"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("SYNC_IO")
|
||||
.help("Force use of synchronous io")
|
||||
.long("sync-io"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("METADATA_SNAPSHOT")
|
||||
.help("Access the metadata snapshot on a live pool")
|
||||
.short("m")
|
||||
.long("metadata-snapshot")
|
||||
.value_name("METADATA_SNAPSHOT"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("OUTPUT")
|
||||
.help("Specify the output file rather than stdout")
|
||||
.short("o")
|
||||
.long("output")
|
||||
.value_name("OUTPUT"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("INPUT")
|
||||
.help("Specify the input device to dump")
|
||||
.required(true)
|
||||
.index(1),
|
||||
);
|
||||
|
||||
let matches = parser.get_matches();
|
||||
let input_file = Path::new(matches.value_of("INPUT").unwrap());
|
||||
let output_file = if matches.is_present("OUTPUT") {
|
||||
Some(Path::new(matches.value_of("OUTPUT").unwrap()))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if !file_utils::file_exists(input_file) {
|
||||
eprintln!("Couldn't find input file '{:?}'.", &input_file);
|
||||
@ -93,7 +82,8 @@ fn main() {
|
||||
}
|
||||
|
||||
let opts = ThinDumpOptions {
|
||||
dev: &input_file,
|
||||
input: input_file,
|
||||
output: output_file,
|
||||
async_io: !matches.is_present("SYNC_IO"),
|
||||
report,
|
||||
};
|
||||
|
29
src/cache/dump.rs
vendored
29
src/cache/dump.rs
vendored
@ -1,5 +1,8 @@
|
||||
use anyhow::anyhow;
|
||||
use fixedbitset::FixedBitSet;
|
||||
use std::fs::File;
|
||||
use std::io::BufWriter;
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
@ -191,7 +194,8 @@ impl<'a> ArrayVisitor<Hint> for HintEmitter<'a> {
|
||||
//------------------------------------------
|
||||
|
||||
pub struct CacheDumpOptions<'a> {
|
||||
pub dev: &'a Path,
|
||||
pub input: &'a Path,
|
||||
pub output: Option<&'a Path>,
|
||||
pub async_io: bool,
|
||||
pub repair: bool,
|
||||
}
|
||||
@ -204,19 +208,24 @@ fn mk_context(opts: &CacheDumpOptions) -> anyhow::Result<Context> {
|
||||
let engine: Arc<dyn IoEngine + Send + Sync>;
|
||||
|
||||
if opts.async_io {
|
||||
engine = Arc::new(AsyncIoEngine::new(opts.dev, MAX_CONCURRENT_IO, false)?);
|
||||
engine = Arc::new(AsyncIoEngine::new(opts.input, MAX_CONCURRENT_IO, false)?);
|
||||
} else {
|
||||
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
|
||||
engine = Arc::new(SyncIoEngine::new(opts.dev, nr_threads, false)?);
|
||||
engine = Arc::new(SyncIoEngine::new(opts.input, nr_threads, false)?);
|
||||
}
|
||||
|
||||
Ok(Context { engine })
|
||||
}
|
||||
|
||||
fn dump_metadata(ctx: &Context, sb: &Superblock, _repair: bool) -> anyhow::Result<()> {
|
||||
fn dump_metadata(
|
||||
ctx: &Context,
|
||||
w: &mut dyn Write,
|
||||
sb: &Superblock,
|
||||
_repair: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
let engine = &ctx.engine;
|
||||
|
||||
let mut out = xml::XmlWriter::new(std::io::stdout());
|
||||
let mut out = xml::XmlWriter::new(w);
|
||||
let xml_sb = xml::Superblock {
|
||||
uuid: "".to_string(),
|
||||
block_size: sb.data_block_size,
|
||||
@ -272,6 +281,7 @@ fn dump_metadata(ctx: &Context, sb: &Superblock, _repair: bool) -> anyhow::Resul
|
||||
out.hints_e()?;
|
||||
|
||||
out.superblock_e()?;
|
||||
out.eof()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -281,7 +291,14 @@ pub fn dump(opts: CacheDumpOptions) -> anyhow::Result<()> {
|
||||
let engine = &ctx.engine;
|
||||
let sb = read_superblock(engine.as_ref(), SUPERBLOCK_LOCATION)?;
|
||||
|
||||
dump_metadata(&ctx, &sb, opts.repair)
|
||||
let mut writer: Box<dyn Write>;
|
||||
if opts.output.is_some() {
|
||||
writer = Box::new(BufWriter::new(File::create(opts.output.unwrap())?));
|
||||
} else {
|
||||
writer = Box::new(BufWriter::new(std::io::stdout()));
|
||||
}
|
||||
|
||||
dump_metadata(&ctx, &mut writer, &sb, opts.repair)
|
||||
}
|
||||
|
||||
//------------------------------------------
|
||||
|
4
src/cache/restore.rs
vendored
4
src/cache/restore.rs
vendored
@ -3,7 +3,6 @@ use anyhow::{anyhow, Result};
|
||||
use std::convert::TryInto;
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::Cursor;
|
||||
use std::ops::Deref;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
@ -230,8 +229,7 @@ impl<'a> MetadataVisitor for Restorer<'a> {
|
||||
fn build_metadata_sm(w: &mut WriteBatcher) -> Result<Vec<u8>> {
|
||||
let mut sm_root = vec![0u8; SPACE_MAP_ROOT_SIZE];
|
||||
let mut cur = Cursor::new(&mut sm_root);
|
||||
let sm_without_meta = clone_space_map(w.sm.lock().unwrap().deref())?;
|
||||
let r = write_metadata_sm(w, sm_without_meta.deref())?;
|
||||
let r = write_metadata_sm(w)?;
|
||||
r.pack(&mut cur)?;
|
||||
|
||||
Ok(sm_root)
|
||||
|
@ -1,10 +1,10 @@
|
||||
use anyhow::Result;
|
||||
use anyhow::{anyhow, Result};
|
||||
use byteorder::WriteBytesExt;
|
||||
use std::collections::VecDeque;
|
||||
use std::io::Cursor;
|
||||
|
||||
use crate::checksum;
|
||||
use crate::io_engine::*;
|
||||
use crate::math::*;
|
||||
use crate::pdata::array::*;
|
||||
use crate::pdata::btree_builder::*;
|
||||
use crate::pdata::unpack::*;
|
||||
@ -14,12 +14,10 @@ use crate::write_batcher::*;
|
||||
|
||||
pub struct ArrayBlockBuilder<V: Unpack + Pack> {
|
||||
array_io: ArrayIO<V>,
|
||||
max_entries_per_block: usize,
|
||||
values: VecDeque<(u64, V)>,
|
||||
array_blocks: Vec<u64>,
|
||||
nr_entries: u64,
|
||||
nr_emitted: u64,
|
||||
nr_queued: u64,
|
||||
nr_entries: u64, // size of the array
|
||||
entries_per_block: usize,
|
||||
array_blocks: Vec<u64>, // emitted array blocks
|
||||
values: Vec<V>, // internal buffer
|
||||
}
|
||||
|
||||
pub struct ArrayBuilder<V: Unpack + Pack> {
|
||||
@ -44,91 +42,68 @@ fn calc_max_entries<V: Unpack>() -> usize {
|
||||
|
||||
impl<V: Unpack + Pack + Clone + Default> ArrayBlockBuilder<V> {
|
||||
pub fn new(nr_entries: u64) -> ArrayBlockBuilder<V> {
|
||||
let entries_per_block = calc_max_entries::<V>();
|
||||
let nr_blocks = div_up(nr_entries, entries_per_block as u64) as usize;
|
||||
let next_cap = std::cmp::min(nr_entries, entries_per_block as u64) as usize;
|
||||
|
||||
ArrayBlockBuilder {
|
||||
array_io: ArrayIO::new(),
|
||||
max_entries_per_block: calc_max_entries::<V>(),
|
||||
values: VecDeque::new(),
|
||||
array_blocks: Vec::new(),
|
||||
nr_entries,
|
||||
nr_emitted: 0,
|
||||
nr_queued: 0,
|
||||
entries_per_block,
|
||||
array_blocks: Vec::with_capacity(nr_blocks),
|
||||
values: Vec::<V>::with_capacity(next_cap),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push_value(&mut self, w: &mut WriteBatcher, index: u64, v: V) -> Result<()> {
|
||||
assert!(index >= self.nr_emitted + self.nr_queued);
|
||||
assert!(index < self.nr_entries);
|
||||
let bi = index / self.entries_per_block as u64;
|
||||
let i = (index % self.entries_per_block as u64) as usize;
|
||||
|
||||
self.values.push_back((index, v));
|
||||
self.nr_queued = index - self.nr_emitted + 1;
|
||||
|
||||
if self.nr_queued > self.max_entries_per_block as u64 {
|
||||
self.emit_blocks(w)?;
|
||||
if bi < self.array_blocks.len() as u64 || i < self.values.len() || index >= self.nr_entries
|
||||
{
|
||||
return Err(anyhow!("array index out of bounds"));
|
||||
}
|
||||
|
||||
while (self.array_blocks.len() as u64) < bi {
|
||||
self.emit_block(w)?;
|
||||
}
|
||||
|
||||
if i > self.values.len() + 1 {
|
||||
self.values.resize_with(i - 1, Default::default);
|
||||
}
|
||||
self.values.push(v);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn complete(mut self, w: &mut WriteBatcher) -> Result<Vec<u64>> {
|
||||
if self.nr_emitted + self.nr_queued < self.nr_entries {
|
||||
// FIXME: flushing with a default values looks confusing
|
||||
self.push_value(w, self.nr_entries - 1, Default::default())?;
|
||||
// Emit all the remaining queued values
|
||||
let nr_blocks = self.array_blocks.capacity();
|
||||
while self.array_blocks.len() < nr_blocks {
|
||||
self.emit_block(w)?;
|
||||
}
|
||||
self.emit_all(w)?;
|
||||
|
||||
Ok(self.array_blocks)
|
||||
}
|
||||
|
||||
/// Emit all the remaining queued values
|
||||
fn emit_all(&mut self, w: &mut WriteBatcher) -> Result<()> {
|
||||
match self.nr_queued {
|
||||
0 => {
|
||||
// There's nothing to emit
|
||||
Ok(())
|
||||
}
|
||||
n if n <= self.max_entries_per_block as u64 => self.emit_values(w),
|
||||
_ => {
|
||||
panic!(
|
||||
"There shouldn't be more than {} queued values",
|
||||
self.max_entries_per_block
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Emit one or more fully utilized array blocks
|
||||
fn emit_blocks(&mut self, w: &mut WriteBatcher) -> Result<()> {
|
||||
while self.nr_queued > self.max_entries_per_block as u64 {
|
||||
self.emit_values(w)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Emit an array block with the queued values
|
||||
fn emit_values(&mut self, w: &mut WriteBatcher) -> Result<()> {
|
||||
let mut values = Vec::<V>::with_capacity(self.max_entries_per_block);
|
||||
let mut nr_free = self.max_entries_per_block;
|
||||
|
||||
while !self.values.is_empty() && nr_free > 0 {
|
||||
let len = self.values.front().unwrap().0 - self.nr_emitted + 1;
|
||||
if len <= nr_free as u64 {
|
||||
let (_, v) = self.values.pop_front().unwrap();
|
||||
if len > 1 {
|
||||
values.resize_with(values.len() + len as usize - 1, Default::default);
|
||||
}
|
||||
values.push(v);
|
||||
nr_free -= len as usize;
|
||||
self.nr_emitted += len;
|
||||
self.nr_queued -= len;
|
||||
} else {
|
||||
values.resize_with(values.len() + nr_free as usize, Default::default);
|
||||
self.nr_emitted += nr_free as u64;
|
||||
self.nr_queued -= nr_free as u64;
|
||||
nr_free = 0;
|
||||
}
|
||||
/// Emit a fully utilized array block
|
||||
fn emit_block(&mut self, w: &mut WriteBatcher) -> Result<()> {
|
||||
let nr_blocks = self.array_blocks.capacity();
|
||||
let cur_bi = self.array_blocks.len();
|
||||
let next_cap;
|
||||
if cur_bi < nr_blocks - 1 {
|
||||
let next_begin = (cur_bi as u64 + 1) * self.entries_per_block as u64;
|
||||
next_cap =
|
||||
std::cmp::min(self.nr_entries - next_begin, self.entries_per_block as u64) as usize;
|
||||
} else {
|
||||
next_cap = 0;
|
||||
}
|
||||
|
||||
let mut values = Vec::<V>::with_capacity(next_cap);
|
||||
std::mem::swap(&mut self.values, &mut values);
|
||||
|
||||
values.resize_with(values.capacity(), Default::default);
|
||||
let wresult = self.array_io.write(w, values)?;
|
||||
|
||||
self.array_blocks.push(wresult.loc);
|
||||
|
||||
Ok(())
|
||||
@ -150,7 +125,7 @@ impl<V: Unpack + Pack + Clone + Default> ArrayBuilder<V> {
|
||||
|
||||
pub fn complete(self, w: &mut WriteBatcher) -> Result<u64> {
|
||||
let blocks = self.block_builder.complete(w)?;
|
||||
let mut index_builder = Builder::<u64>::new(Box::new(NoopRC {}));
|
||||
let mut index_builder = BTreeBuilder::<u64>::new(Box::new(NoopRC {}));
|
||||
|
||||
for (i, b) in blocks.iter().enumerate() {
|
||||
index_builder.push_value(w, i as u64, *b)?;
|
||||
|
@ -36,10 +36,16 @@ impl<Value> RefCounter<Value> for NoopRC {
|
||||
}
|
||||
|
||||
/// Wraps a space map up to become a RefCounter.
|
||||
struct SMRefCounter {
|
||||
pub struct SMRefCounter {
|
||||
sm: Arc<Mutex<dyn SpaceMap>>,
|
||||
}
|
||||
|
||||
impl SMRefCounter {
|
||||
pub fn new(sm: Arc<Mutex<dyn SpaceMap>>) -> SMRefCounter {
|
||||
SMRefCounter { sm }
|
||||
}
|
||||
}
|
||||
|
||||
impl RefCounter<u64> for SMRefCounter {
|
||||
fn get(&self, v: &u64) -> Result<u32> {
|
||||
self.sm.lock().unwrap().get(*v)
|
||||
@ -135,12 +141,16 @@ pub struct WriteResult {
|
||||
loc: u64,
|
||||
}
|
||||
|
||||
/// Write a node to a free metadata block.
|
||||
fn write_node_<V: Unpack + Pack>(w: &mut WriteBatcher, mut node: Node<V>) -> Result<WriteResult> {
|
||||
/// Write a node to a free metadata block, and mark the block as reserved,
|
||||
/// without increasing its reference count.
|
||||
fn write_reserved_node_<V: Unpack + Pack>(
|
||||
w: &mut WriteBatcher,
|
||||
mut node: Node<V>,
|
||||
) -> Result<WriteResult> {
|
||||
let keys = node.get_keys();
|
||||
let first_key = *keys.first().unwrap_or(&0u64);
|
||||
|
||||
let b = w.alloc()?;
|
||||
let b = w.reserve()?;
|
||||
node.set_block(b.loc);
|
||||
|
||||
let mut cursor = Cursor::new(b.get_data());
|
||||
@ -177,7 +187,7 @@ impl<V: Unpack + Pack> NodeIO<V> for LeafIO {
|
||||
values,
|
||||
};
|
||||
|
||||
write_node_(w, node)
|
||||
write_reserved_node_(w, node)
|
||||
}
|
||||
|
||||
fn read(&self, w: &mut WriteBatcher, block: u64) -> Result<(Vec<u64>, Vec<V>)> {
|
||||
@ -210,7 +220,7 @@ impl NodeIO<u64> for InternalIO {
|
||||
values,
|
||||
};
|
||||
|
||||
write_node_(w, node)
|
||||
write_reserved_node_(w, node)
|
||||
}
|
||||
|
||||
fn read(&self, w: &mut WriteBatcher, block: u64) -> Result<(Vec<u64>, Vec<u64>)> {
|
||||
@ -314,7 +324,6 @@ impl<'a, V: Pack + Unpack + Clone> NodeBuilder<V> {
|
||||
// Add the remaining nodes.
|
||||
for i in 1..nodes.len() {
|
||||
let n = nodes.get(i).unwrap();
|
||||
w.sm.lock().unwrap().inc(n.block, 1)?;
|
||||
self.nodes.push(n.clone());
|
||||
}
|
||||
} else {
|
||||
@ -323,7 +332,6 @@ impl<'a, V: Pack + Unpack + Clone> NodeBuilder<V> {
|
||||
|
||||
// add the nodes
|
||||
for n in nodes {
|
||||
w.sm.lock().unwrap().inc(n.block, 1)?;
|
||||
self.nodes.push(n.clone());
|
||||
}
|
||||
}
|
||||
@ -425,7 +433,6 @@ impl<'a, V: Pack + Unpack + Clone> NodeBuilder<V> {
|
||||
fn unshift_node(&mut self, w: &mut WriteBatcher) -> Result<()> {
|
||||
let ls = self.nodes.pop().unwrap();
|
||||
let (keys, values) = self.read_node(w, ls.block)?;
|
||||
w.sm.lock().unwrap().dec(ls.block)?;
|
||||
|
||||
let mut vals = VecDeque::new();
|
||||
|
||||
@ -446,13 +453,13 @@ impl<'a, V: Pack + Unpack + Clone> NodeBuilder<V> {
|
||||
|
||||
//------------------------------------------
|
||||
|
||||
pub struct Builder<V: Unpack + Pack> {
|
||||
pub struct BTreeBuilder<V: Unpack + Pack> {
|
||||
leaf_builder: NodeBuilder<V>,
|
||||
}
|
||||
|
||||
impl<V: Unpack + Pack + Clone> Builder<V> {
|
||||
pub fn new(value_rc: Box<dyn RefCounter<V>>) -> Builder<V> {
|
||||
Builder {
|
||||
impl<V: Unpack + Pack + Clone> BTreeBuilder<V> {
|
||||
pub fn new(value_rc: Box<dyn RefCounter<V>>) -> BTreeBuilder<V> {
|
||||
BTreeBuilder {
|
||||
leaf_builder: NodeBuilder::new(Box::new(LeafIO {}), value_rc),
|
||||
}
|
||||
}
|
||||
@ -466,26 +473,40 @@ impl<V: Unpack + Pack + Clone> Builder<V> {
|
||||
}
|
||||
|
||||
pub fn complete(self, w: &mut WriteBatcher) -> Result<u64> {
|
||||
let mut nodes = self.leaf_builder.complete(w)?;
|
||||
|
||||
// Now we iterate, adding layers of internal nodes until we end
|
||||
// up with a single root.
|
||||
while nodes.len() > 1 {
|
||||
let mut builder = NodeBuilder::new(
|
||||
Box::new(InternalIO {}),
|
||||
Box::new(SMRefCounter { sm: w.sm.clone() }),
|
||||
);
|
||||
|
||||
for n in nodes {
|
||||
builder.push_value(w, n.key, n.block)?;
|
||||
}
|
||||
|
||||
nodes = builder.complete(w)?;
|
||||
}
|
||||
|
||||
assert!(nodes.len() == 1);
|
||||
Ok(nodes[0].block)
|
||||
let nodes = self.leaf_builder.complete(w)?;
|
||||
build_btree(w, nodes)
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------
|
||||
|
||||
// Build a btree from a list of pre-built leaves
|
||||
pub fn build_btree(w: &mut WriteBatcher, leaves: Vec<NodeSummary>) -> Result<u64> {
|
||||
// Now we iterate, adding layers of internal nodes until we end
|
||||
// up with a single root.
|
||||
let mut nodes = leaves;
|
||||
while nodes.len() > 1 {
|
||||
let mut builder = NodeBuilder::new(
|
||||
Box::new(InternalIO {}),
|
||||
Box::new(SMRefCounter::new(w.sm.clone())),
|
||||
);
|
||||
|
||||
for n in nodes {
|
||||
builder.push_value(w, n.key, n.block)?;
|
||||
}
|
||||
|
||||
nodes = builder.complete(w)?;
|
||||
}
|
||||
|
||||
assert!(nodes.len() == 1);
|
||||
|
||||
// The root is expected to be referenced by only one parent,
|
||||
// hence the ref count is increased before the availability
|
||||
// of it's parent.
|
||||
let root = nodes[0].block;
|
||||
w.sm.lock().unwrap().inc(root, 1)?;
|
||||
|
||||
Ok(root)
|
||||
}
|
||||
|
||||
//------------------------------------------
|
||||
|
@ -24,9 +24,16 @@ pub trait SpaceMap {
|
||||
Ok(old == 1)
|
||||
}
|
||||
|
||||
/// Finds a block with a zero reference count. Increments the
|
||||
/// count.
|
||||
/// Finds a block with a zero reference count. Increments the count.
|
||||
/// Returns Ok(None) if no free block (ENOSPC)
|
||||
/// Returns Err on fatal error
|
||||
fn alloc(&mut self) -> Result<Option<u64>>;
|
||||
|
||||
/// Finds a free block within the range
|
||||
fn find_free(&mut self, begin: u64, end: u64) -> Result<Option<u64>>;
|
||||
|
||||
/// Returns the position where allocation starts
|
||||
fn get_alloc_begin(&self) -> Result<u64>;
|
||||
}
|
||||
|
||||
pub type ASpaceMap = Arc<Mutex<dyn SpaceMap + Sync + Send>>;
|
||||
@ -35,7 +42,7 @@ pub type ASpaceMap = Arc<Mutex<dyn SpaceMap + Sync + Send>>;
|
||||
|
||||
pub struct CoreSpaceMap<T> {
|
||||
nr_allocated: u64,
|
||||
first_free: u64,
|
||||
alloc_begin: u64,
|
||||
counts: Vec<T>,
|
||||
}
|
||||
|
||||
@ -46,7 +53,7 @@ where
|
||||
pub fn new(nr_entries: u64) -> CoreSpaceMap<V> {
|
||||
CoreSpaceMap {
|
||||
nr_allocated: 0,
|
||||
first_free: 0,
|
||||
alloc_begin: 0,
|
||||
counts: vec![V::default(); nr_entries as usize],
|
||||
}
|
||||
}
|
||||
@ -77,9 +84,6 @@ where
|
||||
self.nr_allocated += 1;
|
||||
} else if old != V::from(0u8) && v == 0 {
|
||||
self.nr_allocated -= 1;
|
||||
if b < self.first_free {
|
||||
self.first_free = b;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(old.into())
|
||||
@ -99,18 +103,33 @@ where
|
||||
}
|
||||
|
||||
fn alloc(&mut self) -> Result<Option<u64>> {
|
||||
for b in self.first_free..(self.counts.len() as u64) {
|
||||
if self.counts[b as usize] == V::from(0u8) {
|
||||
self.counts[b as usize] = V::from(1u8);
|
||||
self.first_free = b + 1;
|
||||
self.nr_allocated += 1;
|
||||
return Ok(Some(b));
|
||||
let mut b = self.find_free(self.alloc_begin, self.counts.len() as u64)?;
|
||||
if b.is_none() {
|
||||
b = self.find_free(0, self.alloc_begin)?;
|
||||
if b.is_none() {
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
|
||||
self.first_free = self.counts.len() as u64;
|
||||
self.counts[b.unwrap() as usize] = V::from(1u8);
|
||||
self.nr_allocated += 1;
|
||||
self.alloc_begin = b.unwrap() + 1;
|
||||
|
||||
Ok(b)
|
||||
}
|
||||
|
||||
fn find_free(&mut self, begin: u64, end: u64) -> Result<Option<u64>> {
|
||||
for b in begin..end {
|
||||
if self.counts[b as usize] == V::from(0u8) {
|
||||
return Ok(Some(b));
|
||||
}
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn get_alloc_begin(&self) -> Result<u64> {
|
||||
Ok(self.alloc_begin as u64)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn core_sm(nr_entries: u64, max_count: u32) -> Arc<Mutex<dyn SpaceMap + Send + Sync>> {
|
||||
@ -133,16 +152,6 @@ pub fn core_sm_without_mutex(nr_entries: u64, max_count: u32) -> Box<dyn SpaceMa
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: replace it by using the Clone trait
|
||||
pub fn clone_space_map(src: &dyn SpaceMap) -> Result<Box<dyn SpaceMap>> {
|
||||
let nr_blocks = src.get_nr_blocks()?;
|
||||
let mut dest = Box::new(CoreSpaceMap::<u32>::new(nr_blocks));
|
||||
for i in 0..nr_blocks {
|
||||
dest.set(i, src.get(i)?)?;
|
||||
}
|
||||
Ok(dest)
|
||||
}
|
||||
|
||||
//------------------------------------------
|
||||
|
||||
// This in core space map can only count to one, useful when walking
|
||||
@ -150,7 +159,7 @@ pub fn clone_space_map(src: &dyn SpaceMap) -> Result<Box<dyn SpaceMap>> {
|
||||
// aren't interested in counting how many times we've visited.
|
||||
pub struct RestrictedSpaceMap {
|
||||
nr_allocated: u64,
|
||||
first_free: usize,
|
||||
alloc_begin: usize,
|
||||
counts: FixedBitSet,
|
||||
}
|
||||
|
||||
@ -159,7 +168,7 @@ impl RestrictedSpaceMap {
|
||||
RestrictedSpaceMap {
|
||||
nr_allocated: 0,
|
||||
counts: FixedBitSet::with_capacity(nr_entries as usize),
|
||||
first_free: 0,
|
||||
alloc_begin: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -192,9 +201,6 @@ impl SpaceMap for RestrictedSpaceMap {
|
||||
} else {
|
||||
if old {
|
||||
self.nr_allocated -= 1;
|
||||
if b < self.first_free as u64 {
|
||||
self.first_free = b as usize;
|
||||
}
|
||||
}
|
||||
self.counts.set(b as usize, false);
|
||||
}
|
||||
@ -213,17 +219,33 @@ impl SpaceMap for RestrictedSpaceMap {
|
||||
}
|
||||
|
||||
fn alloc(&mut self) -> Result<Option<u64>> {
|
||||
for b in self.first_free..self.counts.len() {
|
||||
if !self.counts.contains(b) {
|
||||
self.counts.insert(b);
|
||||
self.first_free = b + 1;
|
||||
return Ok(Some(b as u64));
|
||||
let mut b = self.find_free(self.alloc_begin as u64, self.counts.len() as u64)?;
|
||||
if b.is_none() {
|
||||
b = self.find_free(0, self.alloc_begin as u64)?;
|
||||
if b.is_none() {
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
|
||||
self.first_free = self.counts.len();
|
||||
self.counts.insert(b.unwrap() as usize);
|
||||
self.nr_allocated += 1;
|
||||
self.alloc_begin = b.unwrap() as usize + 1;
|
||||
|
||||
Ok(b)
|
||||
}
|
||||
|
||||
fn find_free(&mut self, begin: u64, end: u64) -> Result<Option<u64>> {
|
||||
for b in begin..end {
|
||||
if !self.counts.contains(b as usize) {
|
||||
return Ok(Some(b));
|
||||
}
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn get_alloc_begin(&self) -> Result<u64> {
|
||||
Ok(self.alloc_begin as u64)
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------
|
||||
|
@ -190,14 +190,6 @@ fn gather_metadata_index_entries(
|
||||
) -> Result<Vec<IndexEntry>> {
|
||||
let b = engine.read(bitmap_root)?;
|
||||
let entries = unpack::<MetadataIndex>(b.get_data())?.indexes;
|
||||
|
||||
// Filter out unused entries with block 0
|
||||
let entries: Vec<IndexEntry> = entries
|
||||
.iter()
|
||||
.take_while(|e| e.blocknr != 0)
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
metadata_sm.lock().unwrap().inc(bitmap_root, 1)?;
|
||||
inc_entries(&metadata_sm, &entries[0..])?;
|
||||
|
||||
|
@ -111,8 +111,8 @@ impl Pack for Bitmap {
|
||||
fn pack<W: WriteBytesExt>(&self, out: &mut W) -> Result<()> {
|
||||
use BitmapEntry::*;
|
||||
|
||||
out.write_u32::<LittleEndian>(0)?;
|
||||
out.write_u32::<LittleEndian>(0)?;
|
||||
out.write_u32::<LittleEndian>(0)?; // csum
|
||||
out.write_u32::<LittleEndian>(0)?; // padding
|
||||
out.write_u64::<LittleEndian>(self.blocknr)?;
|
||||
|
||||
for chunk in self.entries.chunks(32) {
|
||||
@ -135,6 +135,7 @@ impl Pack for Bitmap {
|
||||
}
|
||||
}
|
||||
}
|
||||
w >>= 64 - chunk.len() * 2;
|
||||
|
||||
u64::pack(&w, out)?;
|
||||
}
|
||||
@ -200,18 +201,21 @@ pub fn write_common(w: &mut WriteBatcher, sm: &dyn SpaceMap) -> Result<(Vec<Inde
|
||||
use BitmapEntry::*;
|
||||
|
||||
let mut index_entries = Vec::new();
|
||||
let mut overflow_builder: Builder<u32> = Builder::new(Box::new(NoopRC {}));
|
||||
let mut overflow_builder: BTreeBuilder<u32> = BTreeBuilder::new(Box::new(NoopRC {}));
|
||||
|
||||
// how many bitmaps do we need?
|
||||
for bm in 0..div_up(sm.get_nr_blocks()? as usize, ENTRIES_PER_BITMAP) {
|
||||
let nr_blocks = sm.get_nr_blocks()?;
|
||||
let nr_bitmaps = div_up(nr_blocks, ENTRIES_PER_BITMAP as u64) as usize;
|
||||
|
||||
for bm in 0..nr_bitmaps {
|
||||
let begin = bm as u64 * ENTRIES_PER_BITMAP as u64;
|
||||
let len = std::cmp::min(nr_blocks - begin, ENTRIES_PER_BITMAP as u64);
|
||||
let mut entries = Vec::with_capacity(ENTRIES_PER_BITMAP);
|
||||
let mut first_free: Option<u32> = None;
|
||||
let mut nr_free: u32 = 0;
|
||||
for i in 0..ENTRIES_PER_BITMAP {
|
||||
let b: u64 = ((bm * ENTRIES_PER_BITMAP) as u64) + i as u64;
|
||||
if b >= sm.get_nr_blocks()? {
|
||||
break;
|
||||
}
|
||||
|
||||
for i in 0..len {
|
||||
let b = begin + i;
|
||||
let rc = sm.get(b)?;
|
||||
let e = match rc {
|
||||
0 => {
|
||||
@ -231,21 +235,13 @@ pub fn write_common(w: &mut WriteBatcher, sm: &dyn SpaceMap) -> Result<(Vec<Inde
|
||||
entries.push(e);
|
||||
}
|
||||
|
||||
// allocate a new block
|
||||
let b = w.alloc()?;
|
||||
let mut cursor = Cursor::new(b.get_data());
|
||||
let blocknr = write_bitmap(w, entries)?;
|
||||
|
||||
// write the bitmap to it
|
||||
let blocknr = b.loc;
|
||||
let bitmap = Bitmap { blocknr, entries };
|
||||
bitmap.pack(&mut cursor)?;
|
||||
w.write(b, checksum::BT::BITMAP)?;
|
||||
|
||||
// Insert into the index tree
|
||||
// Insert into the index list
|
||||
let ie = IndexEntry {
|
||||
blocknr,
|
||||
nr_free,
|
||||
none_free_before: first_free.unwrap_or(ENTRIES_PER_BITMAP as u32),
|
||||
none_free_before: first_free.unwrap_or(len as u32),
|
||||
};
|
||||
index_entries.push(ie);
|
||||
}
|
||||
@ -254,4 +250,102 @@ pub fn write_common(w: &mut WriteBatcher, sm: &dyn SpaceMap) -> Result<(Vec<Inde
|
||||
Ok((index_entries, ref_count_root))
|
||||
}
|
||||
|
||||
pub fn write_metadata_common(w: &mut WriteBatcher) -> Result<(Vec<IndexEntry>, u64)> {
|
||||
use BitmapEntry::*;
|
||||
|
||||
let mut index_entries = Vec::new();
|
||||
let mut overflow_builder: BTreeBuilder<u32> = BTreeBuilder::new(Box::new(NoopRC {}));
|
||||
|
||||
// how many bitmaps do we need?
|
||||
let nr_blocks = w.sm.lock().unwrap().get_nr_blocks()?;
|
||||
let nr_bitmaps = div_up(nr_blocks, ENTRIES_PER_BITMAP as u64) as usize;
|
||||
|
||||
// how many blocks are allocated or reserved so far?
|
||||
let reserved = w.get_reserved_range();
|
||||
if reserved.end < reserved.start {
|
||||
return Err(anyhow!("unsupported allocation pattern"));
|
||||
}
|
||||
let nr_used_bitmaps = div_up(reserved.end, ENTRIES_PER_BITMAP as u64) as usize;
|
||||
|
||||
for bm in 0..nr_used_bitmaps {
|
||||
let begin = bm as u64 * ENTRIES_PER_BITMAP as u64;
|
||||
let len = std::cmp::min(nr_blocks - begin, ENTRIES_PER_BITMAP as u64);
|
||||
let mut entries = Vec::with_capacity(ENTRIES_PER_BITMAP);
|
||||
let mut first_free: Option<u32> = None;
|
||||
|
||||
// blocks beyond the limit won't be checked right now, thus are marked as freed
|
||||
let limit = std::cmp::min(reserved.end - begin, ENTRIES_PER_BITMAP as u64);
|
||||
let mut nr_free: u32 = (len - limit) as u32;
|
||||
|
||||
for i in 0..limit {
|
||||
let b = begin + i;
|
||||
let rc = w.sm.lock().unwrap().get(b)?;
|
||||
let e = match rc {
|
||||
0 => {
|
||||
nr_free += 1;
|
||||
if first_free.is_none() {
|
||||
first_free = Some(i as u32);
|
||||
}
|
||||
Small(0)
|
||||
}
|
||||
1 => Small(1),
|
||||
2 => Small(2),
|
||||
_ => {
|
||||
overflow_builder.push_value(w, b as u64, rc)?;
|
||||
Overflow
|
||||
}
|
||||
};
|
||||
entries.push(e);
|
||||
}
|
||||
|
||||
// Fill unused entries with zeros
|
||||
if limit < len {
|
||||
entries.resize_with(len as usize, || BitmapEntry::Small(0));
|
||||
}
|
||||
|
||||
let blocknr = write_bitmap(w, entries)?;
|
||||
|
||||
// Insert into the index list
|
||||
let ie = IndexEntry {
|
||||
blocknr,
|
||||
nr_free,
|
||||
none_free_before: first_free.unwrap_or(limit as u32),
|
||||
};
|
||||
index_entries.push(ie);
|
||||
}
|
||||
|
||||
// Fill the rest of the bitmaps with zeros
|
||||
for bm in nr_used_bitmaps..nr_bitmaps {
|
||||
let begin = bm as u64 * ENTRIES_PER_BITMAP as u64;
|
||||
let len = std::cmp::min(nr_blocks - begin, ENTRIES_PER_BITMAP as u64);
|
||||
let entries = vec![BitmapEntry::Small(0); ENTRIES_PER_BITMAP];
|
||||
let blocknr = write_bitmap(w, entries)?;
|
||||
|
||||
// Insert into the index list
|
||||
let ie = IndexEntry {
|
||||
blocknr,
|
||||
nr_free: len as u32,
|
||||
none_free_before: 0,
|
||||
};
|
||||
index_entries.push(ie);
|
||||
}
|
||||
|
||||
let ref_count_root = overflow_builder.complete(w)?;
|
||||
Ok((index_entries, ref_count_root))
|
||||
}
|
||||
|
||||
fn write_bitmap(w: &mut WriteBatcher, entries: Vec<BitmapEntry>) -> Result<u64> {
|
||||
// allocate a new block
|
||||
let b = w.alloc_zeroed()?;
|
||||
let mut cursor = Cursor::new(b.get_data());
|
||||
|
||||
// write the bitmap to it
|
||||
let blocknr = b.loc;
|
||||
let bitmap = Bitmap { blocknr, entries };
|
||||
bitmap.pack(&mut cursor)?;
|
||||
w.write(b, checksum::BT::BITMAP)?;
|
||||
|
||||
Ok(blocknr)
|
||||
}
|
||||
|
||||
//------------------------------------------
|
||||
|
@ -10,7 +10,7 @@ use crate::write_batcher::*;
|
||||
pub fn write_disk_sm(w: &mut WriteBatcher, sm: &dyn SpaceMap) -> Result<SMRoot> {
|
||||
let (index_entries, ref_count_root) = write_common(w, sm)?;
|
||||
|
||||
let mut index_builder: Builder<IndexEntry> = Builder::new(Box::new(NoopRC {}));
|
||||
let mut index_builder: BTreeBuilder<IndexEntry> = BTreeBuilder::new(Box::new(NoopRC {}));
|
||||
for (i, ie) in index_entries.iter().enumerate() {
|
||||
index_builder.push_value(w, i as u64, *ie)?;
|
||||
}
|
||||
|
@ -1,12 +1,10 @@
|
||||
use anyhow::Result;
|
||||
use anyhow::{anyhow, Result};
|
||||
use byteorder::{LittleEndian, WriteBytesExt};
|
||||
use nom::{number::complete::*, IResult};
|
||||
use std::collections::BTreeMap;
|
||||
use std::io::Cursor;
|
||||
|
||||
use crate::checksum;
|
||||
use crate::io_engine::*;
|
||||
use crate::pdata::space_map::*;
|
||||
use crate::pdata::space_map_common::*;
|
||||
use crate::pdata::unpack::*;
|
||||
use crate::write_batcher::*;
|
||||
@ -34,6 +32,13 @@ impl Unpack for MetadataIndex {
|
||||
let (i, blocknr) = le_u64(i)?;
|
||||
let (i, indexes) = nom::multi::count(IndexEntry::unpack, MAX_METADATA_BITMAPS)(i)?;
|
||||
|
||||
// Filter out unused entries
|
||||
let indexes: Vec<IndexEntry> = indexes
|
||||
.iter()
|
||||
.take_while(|e| e.blocknr != 0)
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
Ok((i, MetadataIndex { blocknr, indexes }))
|
||||
}
|
||||
}
|
||||
@ -60,27 +65,28 @@ fn block_to_bitmap(b: u64) -> usize {
|
||||
(b / ENTRIES_PER_BITMAP as u64) as usize
|
||||
}
|
||||
|
||||
fn adjust_counts(w: &mut WriteBatcher, ie: &IndexEntry, allocs: &[u64]) -> Result<IndexEntry> {
|
||||
fn adjust_counts(
|
||||
w: &mut WriteBatcher,
|
||||
ie: &IndexEntry,
|
||||
begin: u64,
|
||||
end: u64,
|
||||
) -> Result<IndexEntry> {
|
||||
use BitmapEntry::*;
|
||||
|
||||
let mut first_free = ie.none_free_before;
|
||||
let mut nr_free = ie.nr_free - allocs.len() as u32;
|
||||
let nr_free = ie.nr_free - (end - begin) as u32;
|
||||
|
||||
// Read the bitmap
|
||||
let bitmap_block = w.engine.read(ie.blocknr)?;
|
||||
let (_, mut bitmap) = Bitmap::unpack(bitmap_block.get_data())?;
|
||||
|
||||
// Update all the entries
|
||||
for a in allocs {
|
||||
if first_free == *a as u32 {
|
||||
first_free = *a as u32 + 1;
|
||||
for a in begin..end {
|
||||
if first_free == a as u32 {
|
||||
first_free = a as u32 + 1;
|
||||
}
|
||||
|
||||
if bitmap.entries[*a as usize] == Small(0) {
|
||||
nr_free -= 1;
|
||||
}
|
||||
|
||||
bitmap.entries[*a as usize] = Small(1);
|
||||
bitmap.entries[a as usize] = Small(1);
|
||||
}
|
||||
|
||||
// Write the bitmap
|
||||
@ -96,25 +102,33 @@ fn adjust_counts(w: &mut WriteBatcher, ie: &IndexEntry, allocs: &[u64]) -> Resul
|
||||
})
|
||||
}
|
||||
|
||||
pub fn write_metadata_sm(w: &mut WriteBatcher, sm: &dyn SpaceMap) -> Result<SMRoot> {
|
||||
w.clear_allocations();
|
||||
let (mut indexes, ref_count_root) = write_common(w, sm)?;
|
||||
pub fn write_metadata_sm(w: &mut WriteBatcher) -> Result<SMRoot> {
|
||||
let r1 = w.get_reserved_range();
|
||||
|
||||
let bitmap_root = w.alloc()?;
|
||||
let (mut indexes, ref_count_root) = write_metadata_common(w)?;
|
||||
let bitmap_root = w.alloc_zeroed()?;
|
||||
|
||||
// Now we need to patch up the counts for the metadata that was used for storing
|
||||
// the space map itself. These ref counts all went from 0 to 1.
|
||||
let allocations = w.clear_allocations();
|
||||
|
||||
// Sort the allocations by bitmap
|
||||
let mut by_bitmap = BTreeMap::new();
|
||||
for b in allocations {
|
||||
let bitmap = block_to_bitmap(b);
|
||||
(*by_bitmap.entry(bitmap).or_insert_with(Vec::new)).push(b % ENTRIES_PER_BITMAP as u64);
|
||||
let r2 = w.get_reserved_range();
|
||||
if r2.end < r1.end {
|
||||
return Err(anyhow!("unsupported allocation pattern"));
|
||||
}
|
||||
|
||||
for (bitmap, allocs) in by_bitmap {
|
||||
indexes[bitmap] = adjust_counts(w, &indexes[bitmap], &allocs)?;
|
||||
let bi_begin = block_to_bitmap(r1.end);
|
||||
let bi_end = block_to_bitmap(r2.end) + 1;
|
||||
for (bm, ie) in indexes.iter_mut().enumerate().take(bi_end).skip(bi_begin) {
|
||||
let begin = if bm == bi_begin {
|
||||
r1.end % ENTRIES_PER_BITMAP as u64
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let end = if bm == bi_end - 1 {
|
||||
r2.end % ENTRIES_PER_BITMAP as u64
|
||||
} else {
|
||||
ENTRIES_PER_BITMAP as u64
|
||||
};
|
||||
*ie = adjust_counts(w, ie, begin, end)?
|
||||
}
|
||||
|
||||
// Write out the metadata index
|
||||
@ -128,6 +142,7 @@ pub fn write_metadata_sm(w: &mut WriteBatcher, sm: &dyn SpaceMap) -> Result<SMRo
|
||||
w.write(bitmap_root, checksum::BT::INDEX)?;
|
||||
w.flush()?;
|
||||
|
||||
let sm = w.sm.lock().unwrap();
|
||||
Ok(SMRoot {
|
||||
nr_blocks: sm.get_nr_blocks()?,
|
||||
nr_allocated: sm.get_nr_allocated()?,
|
||||
|
@ -1,5 +1,7 @@
|
||||
use anyhow::{anyhow, Result};
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::fs::File;
|
||||
use std::io::BufWriter;
|
||||
use std::io::Write;
|
||||
use std::ops::DerefMut;
|
||||
use std::path::Path;
|
||||
@ -145,7 +147,8 @@ impl<'a> NodeVisitor<BlockTime> for MappingVisitor<'a> {
|
||||
const MAX_CONCURRENT_IO: u32 = 1024;
|
||||
|
||||
pub struct ThinDumpOptions<'a> {
|
||||
pub dev: &'a Path,
|
||||
pub input: &'a Path,
|
||||
pub output: Option<&'a Path>,
|
||||
pub async_io: bool,
|
||||
pub report: Arc<Report>,
|
||||
}
|
||||
@ -159,10 +162,10 @@ fn mk_context(opts: &ThinDumpOptions) -> Result<Context> {
|
||||
let engine: Arc<dyn IoEngine + Send + Sync>;
|
||||
|
||||
if opts.async_io {
|
||||
engine = Arc::new(AsyncIoEngine::new(opts.dev, MAX_CONCURRENT_IO, false)?);
|
||||
engine = Arc::new(AsyncIoEngine::new(opts.input, MAX_CONCURRENT_IO, false)?);
|
||||
} else {
|
||||
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
|
||||
engine = Arc::new(SyncIoEngine::new(opts.dev, nr_threads, false)?);
|
||||
engine = Arc::new(SyncIoEngine::new(opts.input, nr_threads, false)?);
|
||||
}
|
||||
|
||||
Ok(Context {
|
||||
@ -554,9 +557,9 @@ fn emit_entries<W: Write>(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn dump_metadata(ctx: &Context, sb: &Superblock, md: &Metadata) -> Result<()> {
|
||||
fn dump_metadata(ctx: &Context, w: &mut dyn Write, sb: &Superblock, md: &Metadata) -> Result<()> {
|
||||
let data_root = unpack::<SMRoot>(&sb.data_sm_root[0..])?;
|
||||
let mut out = xml::XmlWriter::new(std::io::stdout());
|
||||
let mut out = xml::XmlWriter::new(w);
|
||||
let xml_sb = xml::Superblock {
|
||||
uuid: "".to_string(),
|
||||
time: sb.time as u64,
|
||||
@ -590,6 +593,7 @@ fn dump_metadata(ctx: &Context, sb: &Superblock, md: &Metadata) -> Result<()> {
|
||||
out.device_e()?;
|
||||
}
|
||||
out.superblock_e()?;
|
||||
out.eof()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -601,11 +605,18 @@ pub fn dump(opts: ThinDumpOptions) -> Result<()> {
|
||||
let sb = read_superblock(ctx.engine.as_ref(), SUPERBLOCK_LOCATION)?;
|
||||
let md = build_metadata(&ctx, &sb)?;
|
||||
|
||||
let mut writer: Box<dyn Write>;
|
||||
if opts.output.is_some() {
|
||||
writer = Box::new(BufWriter::new(File::create(opts.output.unwrap())?));
|
||||
} else {
|
||||
writer = Box::new(BufWriter::new(std::io::stdout()));
|
||||
}
|
||||
|
||||
ctx.report
|
||||
.set_title("Optimising metadata to improve leaf packing");
|
||||
let md = optimise_metadata(md)?;
|
||||
|
||||
dump_metadata(&ctx, &sb, &md)
|
||||
dump_metadata(&ctx, &mut writer, &sb, &md)
|
||||
}
|
||||
|
||||
//------------------------------------------
|
||||
|
@ -57,44 +57,47 @@ impl std::fmt::Display for MappedSection {
|
||||
|
||||
//------------------------------------------
|
||||
|
||||
struct Pass1Result {
|
||||
struct RestoreResult {
|
||||
sb: xml::Superblock,
|
||||
devices: BTreeMap<u32, (DeviceDetail, Vec<NodeSummary>)>,
|
||||
devices: BTreeMap<u32, (DeviceDetail, u64)>,
|
||||
data_sm: Arc<Mutex<dyn SpaceMap>>,
|
||||
}
|
||||
|
||||
struct Pass1<'a> {
|
||||
struct Restorer<'a> {
|
||||
w: &'a mut WriteBatcher,
|
||||
report: Arc<Report>,
|
||||
|
||||
current_dev: Option<DeviceDetail>,
|
||||
// Shared leaves built from the <def> tags
|
||||
sub_trees: BTreeMap<String, Vec<NodeSummary>>,
|
||||
|
||||
// The builder for the current shared sub tree or device
|
||||
map: Option<(MappedSection, NodeBuilder<BlockTime>)>,
|
||||
current_map: Option<(MappedSection, NodeBuilder<BlockTime>)>,
|
||||
current_dev: Option<DeviceDetail>,
|
||||
|
||||
sb: Option<xml::Superblock>,
|
||||
devices: BTreeMap<u32, (DeviceDetail, Vec<NodeSummary>)>,
|
||||
devices: BTreeMap<u32, (DeviceDetail, u64)>,
|
||||
data_sm: Option<Arc<Mutex<dyn SpaceMap>>>,
|
||||
}
|
||||
|
||||
impl<'a> Pass1<'a> {
|
||||
fn new(w: &'a mut WriteBatcher) -> Self {
|
||||
Pass1 {
|
||||
impl<'a> Restorer<'a> {
|
||||
fn new(w: &'a mut WriteBatcher, report: Arc<Report>) -> Self {
|
||||
Restorer {
|
||||
w,
|
||||
current_dev: None,
|
||||
report,
|
||||
sub_trees: BTreeMap::new(),
|
||||
map: None,
|
||||
current_map: None,
|
||||
current_dev: None,
|
||||
sb: None,
|
||||
devices: BTreeMap::new(),
|
||||
data_sm: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_result(self) -> Result<Pass1Result> {
|
||||
fn get_result(self) -> Result<RestoreResult> {
|
||||
if self.sb.is_none() {
|
||||
return Err(anyhow!("No superblock found in xml file"));
|
||||
}
|
||||
Ok(Pass1Result {
|
||||
Ok(RestoreResult {
|
||||
sb: self.sb.unwrap(),
|
||||
devices: self.devices,
|
||||
data_sm: self.data_sm.unwrap(),
|
||||
@ -102,7 +105,7 @@ impl<'a> Pass1<'a> {
|
||||
}
|
||||
|
||||
fn begin_section(&mut self, section: MappedSection) -> Result<Visit> {
|
||||
if let Some((outer, _)) = self.map.as_ref() {
|
||||
if let Some((outer, _)) = self.current_map.as_ref() {
|
||||
let msg = format!(
|
||||
"Nested subtrees are not allowed '{}' within '{}'",
|
||||
section, outer
|
||||
@ -115,24 +118,24 @@ impl<'a> Pass1<'a> {
|
||||
});
|
||||
let leaf_builder = NodeBuilder::new(Box::new(LeafIO {}), value_rc);
|
||||
|
||||
self.map = Some((section, leaf_builder));
|
||||
self.current_map = Some((section, leaf_builder));
|
||||
Ok(Visit::Continue)
|
||||
}
|
||||
|
||||
fn end_section(&mut self) -> Result<(MappedSection, Vec<NodeSummary>)> {
|
||||
let mut current = None;
|
||||
std::mem::swap(&mut self.map, &mut current);
|
||||
std::mem::swap(&mut self.current_map, &mut current);
|
||||
|
||||
if let Some((name, nodes)) = current {
|
||||
Ok((name, nodes.complete(self.w)?))
|
||||
} else {
|
||||
let msg = "Unbalanced </def> tag".to_string();
|
||||
let msg = "Unbalanced </def> or </device> tag".to_string();
|
||||
Err(anyhow!(msg))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MetadataVisitor for Pass1<'a> {
|
||||
impl<'a> MetadataVisitor for Restorer<'a> {
|
||||
fn superblock_b(&mut self, sb: &xml::Superblock) -> Result<Visit> {
|
||||
self.sb = Some(sb.clone());
|
||||
self.data_sm = Some(core_sm(sb.nr_data_blocks, u32::MAX));
|
||||
@ -158,6 +161,8 @@ impl<'a> MetadataVisitor for Pass1<'a> {
|
||||
}
|
||||
|
||||
fn device_b(&mut self, d: &Device) -> Result<Visit> {
|
||||
self.report
|
||||
.info(&format!("building btree for device {}", d.dev_id));
|
||||
self.current_dev = Some(DeviceDetail {
|
||||
mapped_blocks: d.mapped_blocks,
|
||||
transaction_id: d.transaction,
|
||||
@ -170,7 +175,8 @@ impl<'a> MetadataVisitor for Pass1<'a> {
|
||||
fn device_e(&mut self) -> Result<Visit> {
|
||||
if let Some(detail) = self.current_dev.take() {
|
||||
if let (MappedSection::Dev(thin_id), nodes) = self.end_section()? {
|
||||
self.devices.insert(thin_id, (detail, nodes));
|
||||
let root = build_btree(self.w, nodes)?;
|
||||
self.devices.insert(thin_id, (detail, root));
|
||||
Ok(Visit::Continue)
|
||||
} else {
|
||||
Err(anyhow!("internal error, couldn't find device details"))
|
||||
@ -181,13 +187,12 @@ impl<'a> MetadataVisitor for Pass1<'a> {
|
||||
}
|
||||
|
||||
fn map(&mut self, m: &Map) -> Result<Visit> {
|
||||
if let Some((_name, _builder)) = self.map.as_mut() {
|
||||
if let Some((_, builder)) = self.current_map.as_mut() {
|
||||
for i in 0..m.len {
|
||||
let bt = BlockTime {
|
||||
block: m.data_begin + i,
|
||||
time: m.time,
|
||||
};
|
||||
let (_, builder) = self.map.as_mut().unwrap();
|
||||
builder.push_value(self.w, m.thin_begin + i, bt)?;
|
||||
}
|
||||
Ok(Visit::Continue)
|
||||
@ -206,7 +211,7 @@ impl<'a> MetadataVisitor for Pass1<'a> {
|
||||
|
||||
if let Some(leaves) = self.sub_trees.get(name) {
|
||||
// We could be in a <def> or <device>
|
||||
if let Some((_name, builder)) = self.map.as_mut() {
|
||||
if let Some((_name, builder)) = self.current_map.as_mut() {
|
||||
builder.push_nodes(self.w, leaves)?;
|
||||
} else {
|
||||
let msg = format!(
|
||||
@ -246,8 +251,7 @@ fn build_data_sm(w: &mut WriteBatcher, sm: &dyn SpaceMap) -> Result<Vec<u8>> {
|
||||
fn build_metadata_sm(w: &mut WriteBatcher) -> Result<Vec<u8>> {
|
||||
let mut sm_root = vec![0u8; SPACE_MAP_ROOT_SIZE];
|
||||
let mut cur = Cursor::new(&mut sm_root);
|
||||
let sm_without_meta = clone_space_map(w.sm.lock().unwrap().deref())?;
|
||||
let r = write_metadata_sm(w, sm_without_meta.deref())?;
|
||||
let r = write_metadata_sm(w)?;
|
||||
r.pack(&mut cur)?;
|
||||
|
||||
Ok(sm_root)
|
||||
@ -298,39 +302,23 @@ pub fn restore(opts: ThinRestoreOptions) -> Result<()> {
|
||||
|
||||
let sm = core_sm(ctx.engine.get_nr_blocks(), max_count);
|
||||
let mut w = WriteBatcher::new(ctx.engine.clone(), sm.clone(), ctx.engine.get_batch_size());
|
||||
let mut pass = Pass1::new(&mut w);
|
||||
xml::read(input, &mut pass)?;
|
||||
let pass = pass.get_result()?;
|
||||
let mut restorer = Restorer::new(&mut w, ctx.report.clone());
|
||||
xml::read(input, &mut restorer)?;
|
||||
let result = restorer.get_result()?;
|
||||
|
||||
// Build the device details tree.
|
||||
let mut details_builder: Builder<DeviceDetail> = Builder::new(Box::new(NoopRC {}));
|
||||
for (thin_id, (detail, _)) in &pass.devices {
|
||||
// Build the device details and top level mapping tree
|
||||
let mut details_builder: BTreeBuilder<DeviceDetail> = BTreeBuilder::new(Box::new(NoopRC {}));
|
||||
let mut dev_builder: BTreeBuilder<u64> = BTreeBuilder::new(Box::new(NoopRC {}));
|
||||
for (thin_id, (detail, root)) in &result.devices {
|
||||
details_builder.push_value(&mut w, *thin_id as u64, *detail)?;
|
||||
dev_builder.push_value(&mut w, *thin_id as u64, *root)?;
|
||||
}
|
||||
let details_root = details_builder.complete(&mut w)?;
|
||||
|
||||
// Build the individual mapping trees that make up the bottom layer.
|
||||
let mut devs: BTreeMap<u32, u64> = BTreeMap::new();
|
||||
for (thin_id, (_, nodes)) in &pass.devices {
|
||||
ctx.report
|
||||
.info(&format!("building btree for device {}", thin_id));
|
||||
let mut builder: Builder<BlockTime> = Builder::new(Box::new(NoopRC {}));
|
||||
builder.push_leaves(&mut w, nodes)?;
|
||||
let root = builder.complete(&mut w)?;
|
||||
devs.insert(*thin_id, root);
|
||||
}
|
||||
|
||||
// Build the top level mapping tree
|
||||
let mut builder: Builder<u64> = Builder::new(Box::new(NoopRC {}));
|
||||
for (thin_id, root) in devs {
|
||||
builder.push_value(&mut w, thin_id as u64, root)?;
|
||||
}
|
||||
let mapping_root = builder.complete(&mut w)?;
|
||||
let mapping_root = dev_builder.complete(&mut w)?;
|
||||
|
||||
// Build data space map
|
||||
let data_sm_root = build_data_sm(&mut w, pass.data_sm.lock().unwrap().deref())?;
|
||||
let data_sm_root = build_data_sm(&mut w, result.data_sm.lock().unwrap().deref())?;
|
||||
|
||||
// FIXME: I think we need to decrement the shared leaves
|
||||
// Build metadata space map
|
||||
let metadata_sm_root = build_metadata_sm(&mut w)?;
|
||||
|
||||
@ -339,14 +327,14 @@ pub fn restore(opts: ThinRestoreOptions) -> Result<()> {
|
||||
flags: SuperblockFlags { needs_check: false },
|
||||
block: SUPERBLOCK_LOCATION,
|
||||
version: 2,
|
||||
time: pass.sb.time as u32,
|
||||
transaction_id: pass.sb.transaction,
|
||||
time: result.sb.time as u32,
|
||||
transaction_id: result.sb.transaction,
|
||||
metadata_snap: 0,
|
||||
data_sm_root,
|
||||
metadata_sm_root,
|
||||
mapping_root,
|
||||
details_root,
|
||||
data_block_size: pass.sb.data_block_size,
|
||||
data_block_size: result.sb.data_block_size,
|
||||
nr_metadata_blocks: ctx.engine.get_nr_blocks(),
|
||||
};
|
||||
write_superblock(ctx.engine.as_ref(), SUPERBLOCK_LOCATION, &sb)?;
|
||||
|
@ -1,5 +1,5 @@
|
||||
use anyhow::{anyhow, Result};
|
||||
use std::collections::BTreeSet;
|
||||
use std::ops::DerefMut;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use crate::checksum;
|
||||
@ -17,7 +17,30 @@ pub struct WriteBatcher {
|
||||
|
||||
batch_size: usize,
|
||||
queue: Vec<Block>,
|
||||
allocations: BTreeSet<u64>,
|
||||
|
||||
// The reserved range covers all the blocks allocated or reserved by this
|
||||
// WriteBatcher, and the blocks already occupied. No blocks in this range
|
||||
// are expected to be freed, hence a single range is used for the representation.
|
||||
reserved: std::ops::Range<u64>,
|
||||
}
|
||||
|
||||
pub fn find_free(sm: &mut dyn SpaceMap, reserved: &std::ops::Range<u64>) -> Result<u64> {
|
||||
let nr_blocks = sm.get_nr_blocks()?;
|
||||
let mut b;
|
||||
if reserved.end >= reserved.start {
|
||||
b = sm.find_free(reserved.end, nr_blocks)?;
|
||||
if b.is_none() {
|
||||
b = sm.find_free(0, reserved.start)?;
|
||||
}
|
||||
} else {
|
||||
b = sm.find_free(reserved.end, reserved.start)?;
|
||||
}
|
||||
|
||||
if b.is_none() {
|
||||
return Err(anyhow!("out of metadata space"));
|
||||
}
|
||||
|
||||
Ok(b.unwrap())
|
||||
}
|
||||
|
||||
impl WriteBatcher {
|
||||
@ -26,31 +49,61 @@ impl WriteBatcher {
|
||||
sm: Arc<Mutex<dyn SpaceMap>>,
|
||||
batch_size: usize,
|
||||
) -> WriteBatcher {
|
||||
let alloc_begin = sm.lock().unwrap().get_alloc_begin().unwrap_or(0);
|
||||
|
||||
WriteBatcher {
|
||||
engine,
|
||||
sm,
|
||||
batch_size,
|
||||
queue: Vec::with_capacity(batch_size),
|
||||
allocations: BTreeSet::new(),
|
||||
reserved: std::ops::Range {
|
||||
start: alloc_begin,
|
||||
end: alloc_begin,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn alloc(&mut self) -> Result<Block> {
|
||||
let mut sm = self.sm.lock().unwrap();
|
||||
let b = sm.alloc()?;
|
||||
let b = find_free(sm.deref_mut(), &self.reserved)?;
|
||||
self.reserved.end = b + 1;
|
||||
|
||||
if b.is_none() {
|
||||
return Err(anyhow!("out of metadata space"));
|
||||
}
|
||||
sm.set(b, 1)?;
|
||||
|
||||
self.allocations.insert(b.unwrap());
|
||||
Ok(Block::new(b.unwrap()))
|
||||
Ok(Block::new(b))
|
||||
}
|
||||
|
||||
pub fn clear_allocations(&mut self) -> BTreeSet<u64> {
|
||||
let mut tmp = BTreeSet::new();
|
||||
std::mem::swap(&mut tmp, &mut self.allocations);
|
||||
tmp
|
||||
pub fn alloc_zeroed(&mut self) -> Result<Block> {
|
||||
let mut sm = self.sm.lock().unwrap();
|
||||
let b = find_free(sm.deref_mut(), &self.reserved)?;
|
||||
self.reserved.end = b + 1;
|
||||
|
||||
sm.set(b, 1)?;
|
||||
|
||||
Ok(Block::zeroed(b))
|
||||
}
|
||||
|
||||
pub fn reserve(&mut self) -> Result<Block> {
|
||||
let mut sm = self.sm.lock().unwrap();
|
||||
let b = find_free(sm.deref_mut(), &self.reserved)?;
|
||||
self.reserved.end = b + 1;
|
||||
|
||||
Ok(Block::new(b))
|
||||
}
|
||||
|
||||
pub fn reserve_zeroed(&mut self) -> Result<Block> {
|
||||
let mut sm = self.sm.lock().unwrap();
|
||||
let b = find_free(sm.deref_mut(), &self.reserved)?;
|
||||
self.reserved.end = b + 1;
|
||||
|
||||
Ok(Block::zeroed(b))
|
||||
}
|
||||
|
||||
pub fn get_reserved_range(&self) -> std::ops::Range<u64> {
|
||||
std::ops::Range {
|
||||
start: self.reserved.start,
|
||||
end: self.reserved.end,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write(&mut self, b: Block, kind: checksum::BT) -> Result<()> {
|
||||
|
@ -12,14 +12,14 @@ use common::*;
|
||||
#[test]
|
||||
fn accepts_v() -> Result<()> {
|
||||
let stdout = cache_check!("-V").read()?;
|
||||
assert_eq!(stdout, tools_version());
|
||||
assert!(stdout.contains(tools_version()));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accepts_version() -> Result<()> {
|
||||
let stdout = cache_check!("--version").read()?;
|
||||
assert_eq!(stdout, tools_version());
|
||||
assert!(stdout.contains(tools_version()));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -42,7 +42,7 @@ fn accepts_help() -> Result<()> {
|
||||
#[test]
|
||||
fn missing_metadata() -> Result<()> {
|
||||
let stderr = run_fail(cache_check!())?;
|
||||
assert!(stderr.contains("No input file provided"));
|
||||
assert!(stderr.contains(msg::MISSING_INPUT_ARG));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -66,7 +66,7 @@ fn unreadable_metadata() -> Result<()> {
|
||||
let md = mk_valid_md(&mut td)?;
|
||||
cmd!("chmod", "-r", &md).run()?;
|
||||
let stderr = run_fail(cache_check!(&md))?;
|
||||
assert!(stderr.contains("syscall 'open' failed: Permission denied"));
|
||||
assert!(stderr.contains("Permission denied"));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -18,14 +18,62 @@ use test_dir::TestDir;
|
||||
|
||||
//------------------------------------------
|
||||
|
||||
#[cfg(not(feature = "rust_tests"))]
|
||||
pub mod msg {
|
||||
pub const FILE_NOT_FOUND: &str = "Couldn't stat file";
|
||||
pub const MISSING_INPUT_ARG: &str = "No input file provided";
|
||||
pub const MISSING_OUTPUT_ARG: &str = "No output file provided";
|
||||
}
|
||||
|
||||
#[cfg(feature = "rust_tests")]
|
||||
pub mod msg {
|
||||
pub const FILE_NOT_FOUND: &str = "Couldn't find input file";
|
||||
pub const MISSING_INPUT_ARG: &str = "The following required arguments were not provided";
|
||||
pub const MISSING_OUTPUT_ARG: &str = "The following required arguments were not provided";
|
||||
}
|
||||
|
||||
//------------------------------------------
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! path_to_cpp {
|
||||
($name: literal) => {
|
||||
concat!("bin/", $name)
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! path_to_rust {
|
||||
($name: literal) => {
|
||||
env!(concat!("CARGO_BIN_EXE_", $name))
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "rust_tests"))]
|
||||
#[macro_export]
|
||||
macro_rules! path_to {
|
||||
($name: literal) => {
|
||||
path_to_cpp!($name)
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(feature = "rust_tests")]
|
||||
#[macro_export]
|
||||
macro_rules! path_to {
|
||||
($name: literal) => {
|
||||
path_to_rust!($name)
|
||||
};
|
||||
}
|
||||
|
||||
// FIXME: write a macro to generate these commands
|
||||
// Known issue of nested macro definition: https://github.com/rust-lang/rust/issues/35853
|
||||
// RFC: https://github.com/rust-lang/rfcs/blob/master/text/3086-macro-metavar-expr.md
|
||||
#[macro_export]
|
||||
macro_rules! thin_check {
|
||||
( $( $arg: expr ),* ) => {
|
||||
{
|
||||
use std::ffi::OsString;
|
||||
let args: &[OsString] = &[$( Into::<OsString>::into($arg) ),*];
|
||||
duct::cmd("bin/thin_check", args).stdout_capture().stderr_capture()
|
||||
duct::cmd(path_to!("thin_check"), args).stdout_capture().stderr_capture()
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -36,7 +84,7 @@ macro_rules! thin_restore {
|
||||
{
|
||||
use std::ffi::OsString;
|
||||
let args: &[OsString] = &[$( Into::<OsString>::into($arg) ),*];
|
||||
duct::cmd("bin/thin_restore", args).stdout_capture().stderr_capture()
|
||||
duct::cmd(path_to!("thin_restore"), args).stdout_capture().stderr_capture()
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -47,7 +95,7 @@ macro_rules! thin_dump {
|
||||
{
|
||||
use std::ffi::OsString;
|
||||
let args: &[OsString] = &[$( Into::<OsString>::into($arg) ),*];
|
||||
duct::cmd("bin/thin_dump", args).stdout_capture().stderr_capture()
|
||||
duct::cmd(path_to!("thin_dump"), args).stdout_capture().stderr_capture()
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -58,7 +106,7 @@ macro_rules! thin_rmap {
|
||||
{
|
||||
use std::ffi::OsString;
|
||||
let args: &[OsString] = &[$( Into::<OsString>::into($arg) ),*];
|
||||
duct::cmd("bin/thin_rmap", args).stdout_capture().stderr_capture()
|
||||
duct::cmd(path_to_cpp!("thin_rmap"), args).stdout_capture().stderr_capture()
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -69,7 +117,7 @@ macro_rules! thin_repair {
|
||||
{
|
||||
use std::ffi::OsString;
|
||||
let args: &[OsString] = &[$( Into::<OsString>::into($arg) ),*];
|
||||
duct::cmd("bin/thin_repair", args).stdout_capture().stderr_capture()
|
||||
duct::cmd(path_to_cpp!("thin_repair"), args).stdout_capture().stderr_capture()
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -80,7 +128,7 @@ macro_rules! thin_delta {
|
||||
{
|
||||
use std::ffi::OsString;
|
||||
let args: &[OsString] = &[$( Into::<OsString>::into($arg) ),*];
|
||||
duct::cmd("bin/thin_delta", args).stdout_capture().stderr_capture()
|
||||
duct::cmd(path_to_cpp!("thin_delta"), args).stdout_capture().stderr_capture()
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -91,7 +139,7 @@ macro_rules! thin_metadata_pack {
|
||||
{
|
||||
use std::ffi::OsString;
|
||||
let args: &[OsString] = &[$( Into::<OsString>::into($arg) ),*];
|
||||
duct::cmd("bin/thin_metadata_pack", args).stdout_capture().stderr_capture()
|
||||
duct::cmd(path_to_rust!("thin_metadata_pack"), args).stdout_capture().stderr_capture()
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -102,7 +150,7 @@ macro_rules! thin_metadata_unpack {
|
||||
{
|
||||
use std::ffi::OsString;
|
||||
let args: &[OsString] = &[$( Into::<OsString>::into($arg) ),*];
|
||||
duct::cmd("bin/thin_metadata_unpack", args).stdout_capture().stderr_capture()
|
||||
duct::cmd(path_to_rust!("thin_metadata_unpack"), args).stdout_capture().stderr_capture()
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -113,7 +161,7 @@ macro_rules! cache_check {
|
||||
{
|
||||
use std::ffi::OsString;
|
||||
let args: &[OsString] = &[$( Into::<OsString>::into($arg) ),*];
|
||||
duct::cmd("bin/cache_check", args).stdout_capture().stderr_capture()
|
||||
duct::cmd(path_to!("cache_check"), args).stdout_capture().stderr_capture()
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -124,7 +172,7 @@ macro_rules! thin_generate_metadata {
|
||||
{
|
||||
use std::ffi::OsString;
|
||||
let args: &[OsString] = &[$( Into::<OsString>::into($arg) ),*];
|
||||
duct::cmd("bin/thin_generate_metadata", args).stdout_capture().stderr_capture()
|
||||
duct::cmd(path_to_cpp!("thin_generate_metadata"), args).stdout_capture().stderr_capture()
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -135,7 +183,7 @@ macro_rules! thin_generate_mappings {
|
||||
{
|
||||
use std::ffi::OsString;
|
||||
let args: &[OsString] = &[$( Into::<OsString>::into($arg) ),*];
|
||||
duct::cmd("bin/thin_generate_mappings", args).stdout_capture().stderr_capture()
|
||||
duct::cmd(path_to_cpp!("thin_generate_mappings"), args).stdout_capture().stderr_capture()
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -13,14 +13,14 @@ use common::*;
|
||||
#[test]
|
||||
fn accepts_v() -> Result<()> {
|
||||
let stdout = thin_check!("-V").read()?;
|
||||
assert_eq!(stdout, tools_version());
|
||||
assert!(stdout.contains(tools_version()));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accepts_version() -> Result<()> {
|
||||
let stdout = thin_check!("--version").read()?;
|
||||
assert_eq!(stdout, tools_version());
|
||||
assert!(stdout.contains(tools_version()));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -10,14 +10,14 @@ use common::*;
|
||||
#[test]
|
||||
fn accepts_v() -> Result<()> {
|
||||
let stdout = thin_delta!("-V").read()?;
|
||||
assert_eq!(stdout, tools_version());
|
||||
assert!(stdout.contains(tools_version()));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accepts_version() -> Result<()> {
|
||||
let stdout = thin_delta!("--version").read()?;
|
||||
assert_eq!(stdout, tools_version());
|
||||
assert!(stdout.contains(tools_version()));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -65,6 +65,7 @@ fn snap2_unspecified() -> Result<()> {
|
||||
#[test]
|
||||
fn dev_unspecified() -> Result<()> {
|
||||
let stderr = run_fail(thin_delta!("--snap1", "45", "--snap2", "46"))?;
|
||||
assert!(stderr.contains("No input device provided"));
|
||||
// TODO: replace with msg::MISSING_INPUT_ARG once the rust version is ready
|
||||
assert!(stderr.contains("No input file provided"));
|
||||
Ok(())
|
||||
}
|
||||
|
@ -11,14 +11,14 @@ use common::*;
|
||||
#[test]
|
||||
fn accepts_v() -> Result<()> {
|
||||
let stdout = thin_repair!("-V").read()?;
|
||||
assert_eq!(stdout, tools_version());
|
||||
assert!(stdout.contains(tools_version()));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accepts_version() -> Result<()> {
|
||||
let stdout = thin_repair!("--version").read()?;
|
||||
assert_eq!(stdout, tools_version());
|
||||
assert!(stdout.contains(tools_version()));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -53,6 +53,7 @@ fn missing_input_file() -> Result<()> {
|
||||
let md = mk_zeroed_md(&mut td)?;
|
||||
let stderr = run_fail(thin_repair!("-i", "no-such-file", "-o", &md))?;
|
||||
assert!(superblock_all_zeroes(&md)?);
|
||||
// TODO: replace with msg::FILE_NOT_FOUND once the rust version is ready
|
||||
assert!(stderr.contains("Couldn't stat file"));
|
||||
Ok(())
|
||||
}
|
||||
@ -72,6 +73,7 @@ fn missing_output_file() -> Result<()> {
|
||||
let mut td = TestDir::new()?;
|
||||
let md = mk_valid_md(&mut td)?;
|
||||
let stderr = run_fail(thin_repair!("-i", &md))?;
|
||||
// TODO: replace with msg::MISSING_OUTPUT_ARG once the rust version is ready
|
||||
assert!(stderr.contains("No output file provided."));
|
||||
Ok(())
|
||||
}
|
||||
|
@ -12,14 +12,14 @@ use common::*;
|
||||
#[test]
|
||||
fn accepts_v() -> Result<()> {
|
||||
let stdout = thin_restore!("-V").read()?;
|
||||
assert_eq!(stdout, tools_version());
|
||||
assert!(stdout.contains(tools_version()));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accepts_version() -> Result<()> {
|
||||
let stdout = thin_restore!("--version").read()?;
|
||||
assert_eq!(stdout, tools_version());
|
||||
assert!(stdout.contains(tools_version()));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -44,7 +44,7 @@ fn no_input_file() -> Result<()> {
|
||||
let mut td = TestDir::new()?;
|
||||
let md = mk_zeroed_md(&mut td)?;
|
||||
let stderr = run_fail(thin_restore!("-o", &md))?;
|
||||
assert!(stderr.contains("No input file provided."));
|
||||
assert!(stderr.contains(msg::MISSING_INPUT_ARG));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -54,7 +54,7 @@ fn missing_input_file() -> Result<()> {
|
||||
let md = mk_zeroed_md(&mut td)?;
|
||||
let stderr = run_fail(thin_restore!("-i", "no-such-file", "-o", &md))?;
|
||||
assert!(superblock_all_zeroes(&md)?);
|
||||
assert!(stderr.contains("Couldn't stat file"));
|
||||
assert!(stderr.contains(msg::FILE_NOT_FOUND));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -73,7 +73,7 @@ fn no_output_file() -> Result<()> {
|
||||
let mut td = TestDir::new()?;
|
||||
let xml = mk_valid_xml(&mut td)?;
|
||||
let stderr = run_fail(thin_restore!("-i", &xml))?;
|
||||
assert!(stderr.contains("No output file provided."));
|
||||
assert!(stderr.contains(msg::MISSING_OUTPUT_ARG));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -10,14 +10,14 @@ use common::*;
|
||||
#[test]
|
||||
fn accepts_v() -> Result<()> {
|
||||
let stdout = thin_rmap!("-V").read()?;
|
||||
assert_eq!(stdout, tools_version());
|
||||
assert!(stdout.contains(tools_version()));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accepts_version() -> Result<()> {
|
||||
let stdout = thin_rmap!("--version").read()?;
|
||||
assert_eq!(stdout, tools_version());
|
||||
assert!(stdout.contains(tools_version()));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -687,7 +687,7 @@ thin_delta_cmd::run(int argc, char **argv)
|
||||
}
|
||||
|
||||
if (argc == optind)
|
||||
die("No input device provided.");
|
||||
die("No input file provided.");
|
||||
else
|
||||
fs.dev = argv[optind];
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user