Merge branch 'master' of github.com:jthornber/thin-provisioning-tools

This commit is contained in:
Joe Thornber
2020-04-08 12:30:26 +01:00
23 changed files with 493 additions and 868 deletions

View File

@@ -64,8 +64,6 @@ namespace persistent_data {
read_ref(read_ref const &rhs);
virtual ~read_ref();
read_ref const &operator =(read_ref const &rhs);
block_address get_location() const;
void const *data() const;
@@ -82,8 +80,6 @@ namespace persistent_data {
write_ref(write_ref const &rhs);
~write_ref();
write_ref const &operator =(write_ref const &rhs);
using read_ref::data;
void *data();
@@ -136,11 +132,12 @@ namespace persistent_data {
private:
uint64_t choose_cache_size(block_address nr_blocks) const;
int open_or_create_block_file(std::string const &path, off_t file_size,
mode m, bool excl);
file_utils::file_descriptor open_or_create_block_file(std::string const &path,
off_t file_size,
mode m, bool excl);
void check(block_address b) const;
int fd_;
file_utils::file_descriptor fd_;
mutable block_cache bc_;
unsigned superblock_ref_count_;
};

View File

@@ -47,18 +47,6 @@ namespace persistent_data {
b_.put();
}
template <uint32_t BlockSize>
typename block_manager<BlockSize>::read_ref const &
block_manager<BlockSize>::read_ref::operator =(read_ref const &rhs)
{
if (this != &rhs) {
b_ = rhs.b_;
b_.get();
}
return *this;
}
template <uint32_t BlockSize>
block_address
block_manager<BlockSize>::read_ref::get_location() const
@@ -112,18 +100,6 @@ namespace persistent_data {
}
}
template <uint32_t BlockSize>
typename block_manager<BlockSize>::write_ref const &
block_manager<BlockSize>::write_ref::operator =(write_ref const &rhs)
{
if (&rhs != this) {
read_ref::operator =(rhs);
ref_count_ = rhs.ref_count_;
if (ref_count_)
(*ref_count_)++;
}
}
template <uint32_t BlockSize>
void *
block_manager<BlockSize>::write_ref::data()
@@ -154,7 +130,7 @@ namespace persistent_data {
}
template <uint32_t BlockSize>
int
file_utils::file_descriptor
block_manager<BlockSize>::open_or_create_block_file(std::string const &path, off_t file_size, mode m, bool excl)
{
switch (m) {

View File

@@ -16,13 +16,13 @@ damage_tracker::bad_node()
damaged_ = true;
}
maybe_range64
damage_tracker::maybe_run64
damage_tracker::good_internal(block_address begin)
{
maybe_range64 r;
maybe_run64 r;
if (damaged_) {
r = maybe_range64(range64(damage_begin_, begin));
r = maybe_run64(run64(damage_begin_, begin));
damaged_ = false;
}
@@ -30,13 +30,13 @@ damage_tracker::good_internal(block_address begin)
return r;
}
maybe_range64
damage_tracker::good_leaf(uint64_t begin, uint64_t end)
damage_tracker::maybe_run64
damage_tracker::good_leaf(block_address begin, block_address end)
{
maybe_range64 r;
maybe_run64 r;
if (damaged_) {
r = maybe_range64(range64(damage_begin_, begin));
r = maybe_run64(run64(damage_begin_, begin));
damaged_ = false;
}
@@ -44,13 +44,49 @@ damage_tracker::good_leaf(uint64_t begin, uint64_t end)
return r;
}
maybe_range64
damage_tracker::maybe_run64
damage_tracker::end()
{
maybe_run64 r;
if (damaged_)
return maybe_range64(damage_begin_);
r = maybe_run64(damage_begin_);
else
return maybe_range64();
r = maybe_run64();
damaged_ = false;
damage_begin_ = 0;
return r;
}
//----------------------------------------------------------------
path_tracker::path_tracker()
{
// We push an empty path, to ensure there
// is always a current_path.
paths_.push_back(btree_path());
}
btree_path const *
path_tracker::next_path(btree_path const &p)
{
if (p != current_path()) {
if (paths_.size() == 2)
paths_.pop_front();
paths_.push_back(p);
return &paths_.front();
}
return NULL;
}
btree_path const &
path_tracker::current_path() const
{
return paths_.back();
}
//----------------------------------------------------------------

View File

@@ -38,57 +38,20 @@ namespace persistent_data {
// trackers if you have a multilayer tree.
class damage_tracker {
public:
damage_tracker()
: damaged_(false),
damage_begin_(0) {
}
damage_tracker();
typedef run<uint64_t> run64;
typedef boost::optional<run64> maybe_run64;
void bad_node() {
damaged_ = true;
}
void bad_node();
maybe_run64 good_internal(block_address begin) {
maybe_run64 r;
if (damaged_) {
r = maybe_run64(run64(damage_begin_, begin));
damaged_ = false;
}
damage_begin_ = begin;
return r;
}
maybe_run64 good_internal(block_address begin);
// remember 'end' is the one-past-the-end value, so
// take the last key in the leaf and add one.
maybe_run64 good_leaf(block_address begin, block_address end) {
maybe_run64 r;
maybe_run64 good_leaf(block_address begin, block_address end);
if (damaged_) {
r = maybe_run64(run64(damage_begin_, begin));
damaged_ = false;
}
damage_begin_ = end;
return r;
}
maybe_run64 end() {
maybe_run64 r;
if (damaged_)
r = maybe_run64(damage_begin_);
else
r = maybe_run64();
damaged_ = false;
damage_begin_ = 0;
return r;
}
maybe_run64 end();
private:
bool damaged_;
@@ -99,28 +62,12 @@ namespace persistent_data {
// different sub tree (by looking at the btree_path).
class path_tracker {
public:
path_tracker() {
// We push an empty path, to ensure there
// is always a current_path.
paths_.push_back(btree_path());
}
path_tracker();
// returns the old path if the tree has changed.
btree_path const *next_path(btree_path const &p) {
if (p != current_path()) {
if (paths_.size() == 2)
paths_.pop_front();
paths_.push_back(p);
btree_path const *next_path(btree_path const &p);
return &paths_.front();
}
return NULL;
}
btree_path const &current_path() const {
return paths_.back();
}
btree_path const &current_path() const;
private:
std::list<btree_path> paths_;

View File

@@ -153,11 +153,27 @@ namespace {
}
boost::optional<unsigned> find_free(unsigned begin, unsigned end) {
begin = max(begin, ie_.none_free_before_);
if (begin >= end)
return boost::optional<unsigned>();
read_ref rr = tm_.read_lock(ie_.blocknr_, validator_);
void const *bits = bitmap_data(rr);
for (unsigned i = max(begin, ie_.none_free_before_); i < end; i++)
if (__lookup_raw(bits, i) == 0)
return boost::optional<unsigned>(i);
// specify the search range inside the bitmap, in 64-bit unit
le64 const *w = reinterpret_cast<le64 const *>(bits);
le64 const *le64_begin = w + (begin >> 5); // w + div_down(begin, 32)
le64 const *le64_end = w + ((end + 31) >> 5); // w + div_up(end, 32)
for (le64 const *ptr = le64_begin; ptr < le64_end; ptr++) {
// specify the search range among a 64-bit of entries
unsigned entry_begin = (ptr == le64_begin) ? (begin & 0x1F) : 0;
unsigned entry_end = ((ptr == le64_end - 1) && (end & 0x1F)) ?
(end & 0x1F) : 32;
int i;
if ((i = find_free_entry(ptr, entry_begin, entry_end)) >= 0)
return ((ptr - w) << 5) + i;
}
return boost::optional<unsigned>();
}
@@ -198,6 +214,19 @@ namespace {
return result;
}
// find a free entry (a 2-bit pair) among the specified range in the input bits
int find_free_entry(le64 const* bits, unsigned entry_begin, unsigned entry_end) {
uint64_t v = to_cpu<uint64_t>(*bits);
v >>= (entry_begin * 2);
for (; entry_begin < entry_end; entry_begin++) {
if (!(v & 0x3)) {
return entry_begin;
}
v = v >> 2;
}
return -1;
}
transaction_manager &tm_;
bcache::validator::ptr validator_;