Merge branch 'master' into space-map-checking

This commit is contained in:
Joe Thornber
2015-04-07 12:18:23 +01:00
168 changed files with 7851 additions and 2250 deletions

View File

@ -19,9 +19,7 @@
#ifndef BLOCK_H
#define BLOCK_H
#include "persistent-data/buffer.h"
#include "persistent-data/cache.h"
#include "persistent-data/lock_tracker.h"
#include "block-cache/block_cache.h"
#include <stdint.h>
#include <map>
@ -36,145 +34,77 @@
//----------------------------------------------------------------
namespace persistent_data {
using namespace bcache;
uint32_t const MD_BLOCK_SIZE = 4096;
typedef uint64_t block_address;
template <uint32_t BlockSize = MD_BLOCK_SIZE>
class block_io : private boost::noncopyable {
public:
typedef boost::shared_ptr<block_io> ptr;
enum mode {
READ_ONLY,
READ_WRITE,
CREATE
};
block_io(std::string const &path, block_address nr_blocks, mode m);
~block_io();
block_address get_nr_blocks() const {
return nr_blocks_;
}
void read_buffer(block_address location, buffer<BlockSize> &buf) const;
void write_buffer(block_address location, buffer<BlockSize> const &buf);
private:
int fd_;
block_address nr_blocks_;
mode mode_;
};
template <uint32_t BlockSize = MD_BLOCK_SIZE>
class block_manager : private boost::noncopyable {
public:
typedef boost::shared_ptr<block_manager> ptr;
enum mode {
READ_ONLY,
READ_WRITE,
CREATE
};
block_manager(std::string const &path,
block_address nr_blocks,
unsigned max_concurrent_locks,
typename block_io<BlockSize>::mode m);
class validator {
public:
typedef boost::shared_ptr<validator> ptr;
virtual ~validator() {}
virtual void check(buffer<BlockSize> const &b, block_address location) const = 0;
virtual void prepare(buffer<BlockSize> &b, block_address location) const = 0;
};
class noop_validator : public validator {
public:
void check(buffer<BlockSize> const &b, block_address location) const {}
void prepare(buffer<BlockSize> &b, block_address location) const {}
};
enum block_type {
BT_SUPERBLOCK,
BT_NORMAL
};
struct block : private boost::noncopyable {
typedef boost::shared_ptr<block> ptr;
block(typename block_io<BlockSize>::ptr io,
block_address location,
block_type bt,
typename validator::ptr v,
bool zero = false);
~block();
void check_read_lockable() const {
// FIXME: finish
}
void check_write_lockable() const {
// FIXME: finish
}
void flush();
void change_validator(typename block_manager<BlockSize>::validator::ptr v,
bool check = true);
typename block_io<BlockSize>::ptr io_;
block_address location_;
std::auto_ptr<buffer<BlockSize> > data_;
typename validator::ptr validator_;
block_type bt_;
bool dirty_;
};
mode m);
class read_ref {
public:
static uint32_t const BLOCK_SIZE = BlockSize;
read_ref(block_manager<BlockSize> const &bm,
typename block::ptr b);
read_ref(block_cache::block &b);
read_ref(read_ref const &rhs);
virtual ~read_ref();
read_ref const &operator =(read_ref const &rhs);
block_address get_location() const;
buffer<BlockSize> const &data() const;
void const *data() const;
protected:
block_manager<BlockSize> const *bm_;
typename block::ptr block_;
unsigned *holders_;
block_cache::block &b_;
};
// Inherited from read_ref, since you can read a block that's write
// locked.
class write_ref : public read_ref {
public:
write_ref(block_manager<BlockSize> const &bm,
typename block::ptr b);
write_ref(block_cache::block &b);
write_ref(block_cache::block &b, unsigned &ref_count);
write_ref(write_ref const &rhs);
~write_ref();
write_ref const &operator =(write_ref const &rhs);
using read_ref::data;
buffer<BlockSize> &data();
void *data();
private:
unsigned *ref_count_;
};
// Locking methods
read_ref
read_lock(block_address location,
typename validator::ptr v =
typename validator::ptr(new noop_validator())) const;
typename validator::ptr(new bcache::noop_validator())) const;
write_ref
write_lock(block_address location,
typename validator::ptr v =
typename validator::ptr(new noop_validator()));
typename validator::ptr(new bcache::noop_validator()));
write_ref
write_lock_zero(block_address location,
typename validator::ptr v =
typename validator::ptr(new noop_validator()));
typename validator::ptr(new bcache::noop_validator()));
// The super block is the one that should be written last.
// Unlocking this block triggers the following events:
@ -188,13 +118,14 @@ namespace persistent_data {
// being unlocked then an exception will be thrown.
write_ref superblock(block_address b,
typename validator::ptr v =
typename validator::ptr(new noop_validator()));
typename validator::ptr(new bcache::noop_validator()));
write_ref superblock_zero(block_address b,
typename validator::ptr v =
typename validator::ptr(new noop_validator()));
typename validator::ptr(new bcache::noop_validator()));
block_address get_nr_blocks() const;
void prefetch(block_address b) const;
void flush() const;
@ -203,34 +134,18 @@ namespace persistent_data {
bool is_locked(block_address b) const;
private:
int open_or_create_block_file(std::string const &path, off_t file_size, mode m);
void check(block_address b) const;
void write_block(typename block::ptr b) const;
enum lock_type {
READ_LOCK,
WRITE_LOCK
};
struct cache_traits {
typedef typename block::ptr value_type;
typedef block_address key_type;
static key_type get_key(value_type const &v) {
return v->location_;
}
};
typename block_io<BlockSize>::ptr io_;
mutable base::cache<cache_traits> cache_;
// FIXME: we need a dirty list as well as a cache
mutable lock_tracker tracker_;
int fd_;
mutable block_cache bc_;
unsigned superblock_ref_count_;
};
// A little utility to help build validators
inline block_manager<>::validator::ptr
mk_validator(block_manager<>::validator *v) {
return block_manager<>::validator::ptr(v);
inline bcache::validator::ptr
mk_validator(bcache::validator *v) {
return bcache::validator::ptr(v);
}
}

View File

@ -18,13 +18,14 @@
#include "block.h"
#include "base/error_string.h"
#include <errno.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <fcntl.h>
#include <string.h>
#include <boost/bind.hpp>
#include <stdexcept>
@ -37,18 +38,15 @@ namespace {
using namespace std;
int const DEFAULT_MODE = 0666;
unsigned const SECTOR_SHIFT = 9;
// FIXME: these will slow it down until we start doing async io.
int const OPEN_FLAGS = O_DIRECT | O_SYNC;
int const OPEN_FLAGS = O_DIRECT;
// FIXME: introduce a new exception for this, or at least lift this
// to exception.h
void syscall_failed(char const *call) {
char buffer[128];
char *msg = strerror_r(errno, buffer, sizeof(buffer));
ostringstream out;
out << "syscall '" << call << "' failed: " << msg;
out << "syscall '" << call << "' failed: " << base::error_string(errno);;
throw runtime_error(out.str());
}
@ -84,10 +82,9 @@ namespace {
int fd = open_file(path, O_CREAT | O_RDWR);
// fallocate didn't seem to work
int r = ::lseek(fd, file_size, SEEK_SET);
int r = ::ftruncate(fd, file_size);
if (r < 0)
syscall_failed("lseek");
syscall_failed("ftruncate");
return fd;
}
@ -105,189 +102,22 @@ namespace {
namespace persistent_data {
template <uint32_t BlockSize>
block_io<BlockSize>::block_io(std::string const &path, block_address nr_blocks, mode m)
: nr_blocks_(nr_blocks),
mode_(m)
block_manager<BlockSize>::read_ref::read_ref(block_cache::block &b)
: b_(b)
{
off_t file_size = nr_blocks * BlockSize;
switch (m) {
case READ_ONLY:
fd_ = open_block_file(path, file_size, false);
break;
case READ_WRITE:
fd_ = open_block_file(path, file_size, true);
break;
case CREATE:
fd_ = create_block_file(path, file_size);
break;
default:
throw runtime_error("unsupported mode");
}
}
template <uint32_t BlockSize>
block_io<BlockSize>::~block_io()
{
if (::close(fd_) < 0)
syscall_failed("close");
}
template <uint32_t BlockSize>
void
block_io<BlockSize>::read_buffer(block_address location, buffer<BlockSize> &buffer) const
{
off_t r;
r = ::lseek(fd_, BlockSize * location, SEEK_SET);
if (r == (off_t) -1)
throw std::runtime_error("lseek failed");
ssize_t n;
size_t remaining = BlockSize;
unsigned char *buf = buffer.raw();
do {
n = ::read(fd_, buf, remaining);
if (n > 0) {
remaining -= n;
buf += n;
}
} while (remaining && ((n > 0) || (n == EINTR) || (n == EAGAIN)));
if (n < 0)
throw std::runtime_error("read failed");
}
template <uint32_t BlockSize>
void
block_io<BlockSize>::write_buffer(block_address location, buffer<BlockSize> const &buffer)
{
off_t r;
r = ::lseek(fd_, BlockSize * location, SEEK_SET);
if (r == (off_t) -1)
throw std::runtime_error("lseek failed");
ssize_t n;
size_t remaining = BlockSize;
unsigned char const *buf = buffer.raw();
do {
n = ::write(fd_, buf, remaining);
if (n > 0) {
remaining -= n;
buf += n;
}
} while (remaining && ((n > 0) || (n == EINTR) || (n == EAGAIN)));
if (n < 0) {
std::ostringstream out;
out << "write failed to block " << location
<< ", block size = " << BlockSize
<< ", remaining = " << remaining
<< ", n = " << n
<< ", errno = " << errno
<< ", fd_ = " << fd_
<< std::endl;
throw std::runtime_error(out.str());
}
}
//----------------------------------------------------------------
template <uint32_t BlockSize>
block_manager<BlockSize>::block::block(typename block_io<BlockSize>::ptr io,
block_address location,
block_type bt,
typename validator::ptr v,
bool zero)
: io_(io),
location_(location),
data_(new buffer<BlockSize>()),
validator_(v),
bt_(bt),
dirty_(false)
{
if (zero) {
// FIXME: duplicate memset
memset(data_->raw(), 0, BlockSize);
dirty_ = true; // redundant?
} else {
io_->read_buffer(location_, *data_);
validator_->check(*data_, location_);
}
}
template <uint32_t BlockSize>
block_manager<BlockSize>::block::~block()
{
flush();
}
template <uint32_t BlockSize>
void
block_manager<BlockSize>::block::flush()
{
if (dirty_) {
validator_->prepare(*data_, location_);
io_->write_buffer(location_, *data_);
dirty_ = false;
}
}
template <uint32_t BlockSize>
void
block_manager<BlockSize>::block::change_validator(typename block_manager<BlockSize>::validator::ptr v,
bool check)
{
if (v.get() != validator_.get()) {
if (dirty_)
// It may have already happened, by calling
// this we ensure we're consistent.
validator_->prepare(*data_, location_);
validator_ = v;
if (check)
validator_->check(*data_, location_);
}
}
//----------------------------------------------------------------
template <uint32_t BlockSize>
block_manager<BlockSize>::read_ref::read_ref(block_manager<BlockSize> const &bm,
typename block::ptr b)
: bm_(&bm),
block_(b),
holders_(new unsigned)
{
*holders_ = 1;
}
template <uint32_t BlockSize>
block_manager<BlockSize>::read_ref::read_ref(read_ref const &rhs)
: bm_(rhs.bm_),
block_(rhs.block_),
holders_(rhs.holders_)
: b_(rhs.b_)
{
(*holders_)++;
b_.get();
}
template <uint32_t BlockSize>
block_manager<BlockSize>::read_ref::~read_ref()
{
if (!--(*holders_)) {
if (block_->bt_ == BT_SUPERBLOCK) {
bm_->flush();
bm_->cache_.put(block_);
bm_->flush();
} else
bm_->cache_.put(block_);
bm_->tracker_.unlock(block_->location_);
delete holders_;
}
b_.put();
}
template <uint32_t BlockSize>
@ -295,10 +125,8 @@ namespace persistent_data {
block_manager<BlockSize>::read_ref::operator =(read_ref const &rhs)
{
if (this != &rhs) {
block_ = rhs.block_;
bm_ = rhs.bm_;
holders_ = rhs.holders_;
(*holders_)++;
b_ = rhs.b_;
b_.get();
}
return *this;
@ -308,229 +136,174 @@ namespace persistent_data {
block_address
block_manager<BlockSize>::read_ref::get_location() const
{
return block_->location_;
return b_.get_index();
}
template <uint32_t BlockSize>
buffer<BlockSize> const &
void const *
block_manager<BlockSize>::read_ref::data() const
{
return *block_->data_;
return b_.get_data();
}
//--------------------------------
//--------------------------------
template <uint32_t BlockSize>
block_manager<BlockSize>::write_ref::write_ref(block_manager<BlockSize> const &bm,
typename block::ptr b)
: read_ref(bm, b)
block_manager<BlockSize>::write_ref::write_ref(block_cache::block &b)
: read_ref(b),
ref_count_(NULL)
{
b->dirty_ = true;
}
template <uint32_t BlockSize>
buffer<BlockSize> &
block_manager<BlockSize>::write_ref::write_ref(block_cache::block &b, unsigned &ref_count)
: read_ref(b),
ref_count_(&ref_count) {
if (*ref_count_)
throw std::runtime_error("superblock already locked");
(*ref_count_)++;
}
template <uint32_t BlockSize>
block_manager<BlockSize>::write_ref::write_ref(write_ref const &rhs)
: read_ref(rhs),
ref_count_(rhs.ref_count_) {
if (ref_count_)
(*ref_count_)++;
}
template <uint32_t BlockSize>
block_manager<BlockSize>::write_ref::~write_ref()
{
if (ref_count_) {
if (!*ref_count_)
throw std::runtime_error("write_ref ref_count going below zero");
(*ref_count_)--;
}
}
template <uint32_t BlockSize>
typename block_manager<BlockSize>::write_ref const &
block_manager<BlockSize>::write_ref::operator =(write_ref const &rhs)
{
if (&rhs != this) {
read_ref::operator =(rhs);
ref_count_ = rhs.ref_count_;
if (ref_count_)
(*ref_count_)++;
}
}
template <uint32_t BlockSize>
void *
block_manager<BlockSize>::write_ref::data()
{
return *read_ref::block_->data_;
return read_ref::b_.get_data();
}
//----------------------------------------------------------------
//----------------------------------------------------------------
template <uint32_t BlockSize>
block_manager<BlockSize>::block_manager(std::string const &path,
block_address nr_blocks,
unsigned max_concurrent_blocks,
typename block_io<BlockSize>::mode mode)
: io_(new block_io<BlockSize>(path, nr_blocks, mode)),
cache_(max(64u, max_concurrent_blocks)),
tracker_(0, nr_blocks)
mode m)
: fd_(open_or_create_block_file(path, nr_blocks * BlockSize, m)),
bc_(fd_, BlockSize >> SECTOR_SHIFT, nr_blocks, 1024u * 1024u * 16),
superblock_ref_count_(0)
{
}
template <uint32_t BlockSize>
int
block_manager<BlockSize>::open_or_create_block_file(string const &path, off_t file_size, mode m)
{
switch (m) {
case READ_ONLY:
return open_block_file(path, file_size, false);
case READ_WRITE:
return open_block_file(path, file_size, true);
case CREATE:
return create_block_file(path, file_size);
default:
throw std::runtime_error("unsupported mode");
}
}
template <uint32_t BlockSize>
typename block_manager<BlockSize>::read_ref
block_manager<BlockSize>::read_lock(block_address location,
typename block_manager<BlockSize>::validator::ptr v) const
typename bcache::validator::ptr v) const
{
tracker_.read_lock(location);
try {
check(location);
boost::optional<typename block::ptr> cached_block = cache_.get(location);
if (cached_block) {
typename block::ptr cb = *cached_block;
cb->check_read_lockable();
cb->change_validator(v);
return read_ref(*this, *cached_block);
}
typename block::ptr b(new block(io_, location, BT_NORMAL, v));
cache_.insert(b);
return read_ref(*this, b);
} catch (...) {
tracker_.unlock(location);
throw;
}
block_cache::block &b = bc_.get(location, 0, v);
return read_ref(b);
}
template <uint32_t BlockSize>
typename block_manager<BlockSize>::write_ref
block_manager<BlockSize>::write_lock(block_address location,
typename block_manager<BlockSize>::validator::ptr v)
typename bcache::validator::ptr v)
{
tracker_.write_lock(location);
try {
check(location);
boost::optional<typename block::ptr> cached_block = cache_.get(location);
if (cached_block) {
typename block::ptr cb = *cached_block;
cb->check_write_lockable();
cb->change_validator(v);
return write_ref(*this, *cached_block);
}
typename block::ptr b(new block(io_, location, BT_NORMAL, v));
cache_.insert(b);
return write_ref(*this, b);
} catch (...) {
tracker_.unlock(location);
throw;
}
block_cache::block &b = bc_.get(location, block_cache::GF_DIRTY, v);
return write_ref(b);
}
template <uint32_t BlockSize>
typename block_manager<BlockSize>::write_ref
block_manager<BlockSize>::write_lock_zero(block_address location,
typename block_manager<BlockSize>::validator::ptr v)
typename bcache::validator::ptr v)
{
tracker_.write_lock(location);
try {
check(location);
boost::optional<typename block::ptr> cached_block = cache_.get(location);
if (cached_block) {
typename block::ptr cb = *cached_block;
cb->check_write_lockable();
cb->change_validator(v, false);
memset((*cached_block)->data_->raw(), 0, BlockSize);
return write_ref(*this, *cached_block);
}
typename block::ptr b(new block(io_, location, BT_NORMAL, v, true));
cache_.insert(b);
return write_ref(*this, b);
} catch (...) {
tracker_.unlock(location);
throw;
}
block_cache::block &b = bc_.get(location, block_cache::GF_ZERO, v);
return write_ref(b);
}
template <uint32_t BlockSize>
typename block_manager<BlockSize>::write_ref
block_manager<BlockSize>::superblock(block_address location,
typename block_manager<BlockSize>::validator::ptr v)
typename bcache::validator::ptr v)
{
tracker_.superblock_lock(location);
try {
check(location);
if (bc_.get_nr_locked() > 0)
throw std::runtime_error("attempt to lock superblock while other locks are still held");
boost::optional<typename block::ptr> cached_block = cache_.get(location);
if (cached_block) {
typename block::ptr cb = *cached_block;
cb->check_write_lockable();
cb->bt_ = BT_SUPERBLOCK;
cb->change_validator(v);
return write_ref(*this, *cached_block);
}
typename block::ptr b(new block(io_, location, BT_SUPERBLOCK, v));
cache_.insert(b);
return write_ref(*this, b);
} catch (...) {
tracker_.unlock(location);
throw;
}
block_cache::block &b = bc_.get(location, block_cache::GF_DIRTY | block_cache::GF_BARRIER, v);
return write_ref(b, superblock_ref_count_);
}
template <uint32_t BlockSize>
typename block_manager<BlockSize>::write_ref
block_manager<BlockSize>::superblock_zero(block_address location,
typename block_manager<BlockSize>::validator::ptr v)
typename bcache::validator::ptr v)
{
tracker_.superblock_lock(location);
try {
check(location);
if (bc_.get_nr_locked() > 0)
throw std::runtime_error("attempt to lock superblock while other locks are still held");
boost::optional<typename block::ptr> cached_block = cache_.get(location);
if (cached_block) {
typename block::ptr cb = *cached_block;
cb->check_write_lockable();
cb->bt_ = BT_SUPERBLOCK;
cb->change_validator(v, false);
memset(cb->data_->raw(), 0, BlockSize); // FIXME: add a zero method to buffer
return write_ref(*this, *cached_block);
}
typename block::ptr b(new block(io_, location, BT_SUPERBLOCK, v, true));
cache_.insert(b);
return write_ref(*this, b);
} catch (...) {
tracker_.unlock(location);
throw;
}
}
template <uint32_t BlockSize>
void
block_manager<BlockSize>::check(block_address b) const
{
if (b >= io_->get_nr_blocks())
throw std::runtime_error("block address out of bounds");
block_cache::block &b = bc_.get(location, block_cache::GF_ZERO | block_cache::GF_BARRIER, v);
return write_ref(b, superblock_ref_count_);
}
template <uint32_t BlockSize>
block_address
block_manager<BlockSize>::get_nr_blocks() const
{
return io_->get_nr_blocks();
return bc_.get_nr_blocks();
}
template <uint32_t BlockSize>
void
block_manager<BlockSize>::write_block(typename block::ptr b) const
block_manager<BlockSize>::prefetch(block_address b) const
{
b->flush();
bc_.prefetch(b);
}
template <uint32_t BlockSize>
void
block_manager<BlockSize>::flush() const
{
cache_.iterate_unheld(
boost::bind(&block_manager<BlockSize>::write_block, this, _1));
}
template <uint32_t BlockSize>
bool
block_manager<BlockSize>::is_locked(block_address b) const
{
return tracker_.is_locked(b);
bc_.flush();
}
}

View File

@ -1,107 +0,0 @@
// Copyright (C) 2013 Red Hat, Inc. All rights reserved.
//
// This file is part of the thin-provisioning-tools source.
//
// thin-provisioning-tools is free software: you can redistribute it
// and/or modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation, either version 3 of
// the License, or (at your option) any later version.
//
// thin-provisioning-tools is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with thin-provisioning-tools. If not, see
// <http://www.gnu.org/licenses/>.
#ifndef BUFFER_H
#define BUFFER_H
#include <stdint.h>
// #include <stdlib.h>
#include <malloc.h>
#include <boost/noncopyable.hpp>
#include <boost/optional.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/static_assert.hpp>
#include <stdexcept>
//----------------------------------------------------------------
namespace persistent_data {
uint32_t const DEFAULT_BUFFER_SIZE = 4096;
// Allocate buffer of Size with Alignment imposed.
//
// Allocation needs to be on the heap in order to provide alignment
// guarantees.
//
// Alignment must be a power of two.
template <uint32_t Size = DEFAULT_BUFFER_SIZE, uint32_t Alignment = 4096>
class buffer : private boost::noncopyable {
public:
BOOST_STATIC_ASSERT((Alignment > 1) & !(Alignment & (Alignment - 1)));
static uint32_t const ALIGNMENT = Alignment;
typedef boost::shared_ptr<buffer> ptr;
typedef boost::shared_ptr<buffer const> const_ptr;
size_t size() const {
return Size;
}
unsigned char &operator[](unsigned index) {
check_index(index);
return data_[index];
}
unsigned char const &operator[](unsigned index) const {
check_index(index);
return data_[index];
}
unsigned char *raw() {
return data_;
}
unsigned char const *raw() const {
return data_;
}
static void *operator new(size_t s) {
// void *r;
// return posix_memalign(&r, Alignment, s) ? NULL : r;
// Allocates size bytes and returns a pointer to the
// allocated memory. The memory address will be a
// multiple of 'Alignment', which must be a power of two
void *mem = memalign(Alignment, s);
if (!mem)
throw std::bad_alloc();
return mem;
}
static void operator delete(void *p) {
free(p);
}
private:
unsigned char data_[Size];
static void check_index(unsigned index) {
if (index >= Size)
throw std::range_error("buffer index out of bounds");
}
};
}
//----------------------------------------------------------------
#endif

View File

@ -1,284 +0,0 @@
// Copyright (C) 2011 Red Hat, Inc. All rights reserved.
//
// This file is part of the thin-provisioning-tools source.
//
// thin-provisioning-tools is free software: you can redistribute it
// and/or modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation, either version 3 of
// the License, or (at your option) any later version.
//
// thin-provisioning-tools is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with thin-provisioning-tools. If not, see
// <http://www.gnu.org/licenses/>.
#ifndef CACHE_H
#define CACHE_H
#include "deleter.h"
#include <boost/intrusive/circular_list_algorithms.hpp>
#include <boost/intrusive/rbtree_algorithms.hpp>
#include <boost/optional.hpp>
#include <list>
#include <map>
#include <memory>
#include <stdexcept>
//----------------------------------------------------------------
namespace base {
// ValueTraits needs to define value_type, key_type and a get_key()
// static function. Commonly you will want value_type to be a
// shared_ptr, with any teardown specific stuff in the destructor.
template <typename ValueTraits>
class cache {
public:
typedef typename ValueTraits::value_type value_type;
typedef typename ValueTraits::key_type key_type;
cache(unsigned max_entries);
~cache();
void insert(value_type const &v);
boost::optional<value_type> get(key_type const &k);
void put(value_type const &k);
template <typename T>
void iterate_unheld(T fn) const;
private:
void make_space();
struct value_entry {
// FIXME: this means the cached object must have a
// default constructor also, which is a shame.
// so we can construct the headers.
value_entry()
: ref_count_(1) {
}
explicit value_entry(value_type v)
: ref_count_(1),
v_(v) {
}
struct lru {
lru()
: next_(0),
prev_(0) {
}
value_entry *next_, *prev_;
};
struct lookup {
lookup()
: parent_(0),
left_(0),
right_(0),
color_() {
}
value_entry *parent_, *left_, *right_;
int color_;
};
lru lru_;
lookup lookup_;
unsigned ref_count_;
value_type v_;
};
struct value_ptr_cmp {
bool operator() (value_entry const *lhs, value_entry const *rhs) {
key_type k1 = ValueTraits::get_key(lhs->v_);
key_type k2 = ValueTraits::get_key(rhs->v_);
return k1 < k2;
}
};
struct key_value_ptr_cmp {
bool operator() (key_type const &k1, value_entry const *rhs) {
key_type k2 = ValueTraits::get_key(rhs->v_);
return k1 < k2;
}
bool operator() (value_entry const *lhs, key_type const &k2) {
key_type k1 = ValueTraits::get_key(lhs->v_);
return k1 < k2;
}
};
struct list_node_traits {
typedef value_entry node;
typedef value_entry *node_ptr;
typedef const value_entry *const_node_ptr;
static node_ptr get_next(const_node_ptr n) {
return n->lru_.next_;
}
static void set_next(node_ptr n, node_ptr next) {
n->lru_.next_ = next;
}
static node_ptr get_previous(const_node_ptr n) {
return n->lru_.prev_;
}
static void set_previous(node_ptr n, node_ptr prev) {
n->lru_.prev_ = prev;
}
};
struct rbtree_node_traits {
typedef value_entry node;
typedef value_entry *node_ptr;
typedef const value_entry * const_node_ptr;
typedef int color;
static node_ptr get_parent(const_node_ptr n) {
return n->lookup_.parent_;
}
static void set_parent(node_ptr n, node_ptr parent) {
n->lookup_.parent_ = parent;
}
static node_ptr get_left(const_node_ptr n) {
return n->lookup_.left_;
}
static void set_left(node_ptr n, node_ptr left) {
n->lookup_.left_ = left;
}
static node_ptr get_right(const_node_ptr n) {
return n->lookup_.right_;
}
static void set_right(node_ptr n, node_ptr right) {
n->lookup_.right_ = right;
}
static int get_color(const_node_ptr n) {
return n->lookup_.color_;
}
static void set_color(node_ptr n, color c) {
n->lookup_.color_ = c;
}
static color red() {
return 0;
}
static color black() {
return 1;
}
};
typedef boost::intrusive::circular_list_algorithms<list_node_traits> lru_algo;
typedef boost::intrusive::rbtree_algorithms<rbtree_node_traits> lookup_algo;
unsigned max_entries_;
unsigned current_entries_;
value_entry lru_header_;
value_entry lookup_header_;
};
template <typename ValueTraits>
cache<ValueTraits>::cache(unsigned max_entries)
: max_entries_(max_entries),
current_entries_(0) {
lru_algo::init_header(&lru_header_);
lookup_algo::init_header(&lookup_header_);
}
template <typename ValueTraits>
cache<ValueTraits>::~cache() {
utils::deleter<value_entry> d;
lookup_algo::clear_and_dispose(&lookup_header_, d);
}
template <typename ValueTraits>
void
cache<ValueTraits>::insert(value_type const &v) {
make_space();
std::auto_ptr<value_entry> node(new value_entry(v));
value_ptr_cmp cmp;
lookup_algo::insert_equal(&lookup_header_, &lookup_header_, node.get(), cmp);
node.release();
current_entries_++;
}
template <typename ValueTraits>
boost::optional<typename ValueTraits::value_type>
cache<ValueTraits>::get(key_type const &k) {
key_value_ptr_cmp cmp;
value_entry *node = lookup_algo::find(&lookup_header_, k, cmp);
if (node == &lookup_header_)
return boost::optional<value_type>();
if (!node->ref_count_++)
lru_algo::unlink(node);
return boost::optional<value_type>(node->v_);
}
template <typename ValueTraits>
void
cache<ValueTraits>::put(value_type const &v) {
// FIXME: the lookup will go once we use a proper hook
key_value_ptr_cmp cmp;
key_type k = ValueTraits::get_key(v);
value_entry *node = lookup_algo::find(&lookup_header_, k, cmp);
if (node == &lookup_header_)
throw std::runtime_error("invalid put");
if (node->ref_count_ == 0)
throw std::runtime_error("invalid put");
if (!--node->ref_count_)
lru_algo::link_after(&lru_header_, node);
}
template <typename ValueTraits>
void
cache<ValueTraits>::make_space() {
if (current_entries_ == max_entries_) {
value_entry *node = lru_header_.lru_.prev_;
if (node == &lru_header_)
throw std::runtime_error("cache full");
lru_algo::unlink(node);
lookup_algo::unlink(node);
delete node;
current_entries_--;
}
}
template <typename ValueTraits>
template <typename T>
void
cache<ValueTraits>::iterate_unheld(T fn) const {
value_entry *n = lru_header_.lru_.next_;
while (n != &lru_header_) {
fn(n->v_);
n = n->lru_.next_;
}
}
}
//----------------------------------------------------------------
#endif

View File

@ -31,9 +31,9 @@ namespace persistent_data {
namespace array_detail {
uint32_t const ARRAY_CSUM_XOR = 595846735;
struct array_block_validator : public block_manager<>::validator {
virtual void check(buffer<> const &b, block_address location) const {
array_block_disk const *data = reinterpret_cast<array_block_disk const *>(&b);
struct array_block_validator : public bcache::validator {
virtual void check(void const *raw, block_address location) const {
array_block_disk const *data = reinterpret_cast<array_block_disk const *>(raw);
crc32c sum(ARRAY_CSUM_XOR);
sum.append(&data->max_entries, MD_BLOCK_SIZE - sizeof(uint32_t));
if (sum.get_sum() != to_cpu<uint32_t>(data->csum))
@ -43,8 +43,8 @@ namespace persistent_data {
throw checksum_error("bad block nr in array block");
}
virtual void prepare(buffer<> &b, block_address location) const {
array_block_disk *data = reinterpret_cast<array_block_disk *>(&b);
virtual void prepare(void *raw, block_address location) const {
array_block_disk *data = reinterpret_cast<array_block_disk *>(raw);
data->blocknr = to_disk<base::le64, uint64_t>(location);
crc32c sum(ARRAY_CSUM_XOR);
@ -172,7 +172,7 @@ namespace persistent_data {
unsigned visit_array_block(ValueVisitor &vv,
btree_path const &p,
typename block_traits::value_type const &v) const {
rblock rb(tm_->read_lock(v, validator_), rc_);
rblock rb(tm_.read_lock(v, validator_), rc_);
for (uint32_t i = 0; i < rb.nr_entries(); i++)
vv.visit(p[0] * rb.max_entries() + i, rb.get(i));
@ -207,8 +207,6 @@ namespace persistent_data {
unsigned entries_per_block_;
};
typedef typename persistent_data::transaction_manager::ptr tm_ptr;
typedef block_manager<>::write_ref write_ref;
typedef block_manager<>::read_ref read_ref;
@ -219,23 +217,23 @@ namespace persistent_data {
typedef typename ValueTraits::value_type value_type;
typedef typename ValueTraits::ref_counter ref_counter;
array(tm_ptr tm, ref_counter rc)
array(transaction_manager &tm, ref_counter rc)
: tm_(tm),
entries_per_block_(rblock::calc_max_entries()),
nr_entries_(0),
block_rc_(tm->get_sm(), *this),
block_rc_(tm.get_sm(), *this),
block_tree_(tm, block_rc_),
rc_(rc),
validator_(new array_detail::array_block_validator) {
}
array(tm_ptr tm, ref_counter rc,
array(transaction_manager &tm, ref_counter rc,
block_address root,
unsigned nr_entries)
: tm_(tm),
entries_per_block_(rblock::calc_max_entries()),
nr_entries_(nr_entries),
block_rc_(tm->get_sm(), *this),
block_rc_(tm.get_sm(), *this),
block_tree_(tm, root, block_rc_),
rc_(rc),
validator_(new array_detail::array_block_validator) {
@ -378,7 +376,7 @@ namespace persistent_data {
wblock new_ablock(unsigned ablock_index) {
uint64_t key[1] = {ablock_index};
write_ref b = tm_->new_block(validator_);
write_ref b = tm_.new_block(validator_);
block_address location = b.get_location();
wblock wb(b, rc_);
@ -389,13 +387,13 @@ namespace persistent_data {
rblock get_ablock(unsigned ablock_index) const {
block_address addr = lookup_block_address(ablock_index);
return rblock(tm_->read_lock(addr, validator_), rc_);
return rblock(tm_.read_lock(addr, validator_), rc_);
}
wblock shadow_ablock(unsigned ablock_index) {
uint64_t key[1] = {ablock_index};
block_address addr = lookup_block_address(ablock_index);
std::pair<write_ref, bool> p = tm_->shadow(addr, validator_);
std::pair<write_ref, bool> p = tm_.shadow(addr, validator_);
wblock wb = wblock(p.first, rc_);
if (p.second)
@ -407,17 +405,17 @@ namespace persistent_data {
}
void dec_ablock_entries(block_address addr) {
rblock b(tm_->read_lock(addr, validator_), rc_);
rblock b(tm_.read_lock(addr, validator_), rc_);
b.dec_all_entries();
}
tm_ptr tm_;
transaction_manager &tm_;
unsigned entries_per_block_;
unsigned nr_entries_;
block_ref_counter block_rc_;
btree<1, block_traits> block_tree_;
typename ValueTraits::ref_counter rc_;
block_manager<>::validator::ptr validator_;
bcache::validator::ptr validator_;
};
}

View File

@ -19,7 +19,7 @@
#ifndef ARRAY_BLOCK_H
#define ARRAY_BLOCK_H
#include "persistent-data/endian_utils.h"
#include "base/endian_utils.h"
//----------------------------------------------------------------
@ -163,11 +163,11 @@ namespace persistent_data {
}
array_block_disk *get_header() {
return reinterpret_cast<array_block_disk *>(ref_.data().raw());
return reinterpret_cast<array_block_disk *>(ref_.data());
}
array_block_disk const *get_header() const {
return reinterpret_cast<array_block_disk const *>(ref_.data().raw());
return reinterpret_cast<array_block_disk const *>(ref_.data());
}
disk_type &element_at(unsigned int index) {

View File

@ -2,7 +2,6 @@
#include "persistent-data/data-structures/bitset.h"
#include "persistent-data/math_utils.h"
using namespace boost;
using namespace persistent_data;
using namespace persistent_data::bitset_detail;
using namespace std;
@ -12,7 +11,7 @@ using namespace std;
namespace {
struct bitset_traits {
typedef base::le64 disk_type;
typedef uint64_t value_type;
typedef ::uint64_t value_type;
typedef no_op_ref_counter<uint64_t> ref_counter;
static void unpack(disk_type const &disk, value_type &value) {
@ -27,25 +26,31 @@ namespace {
namespace persistent_data {
namespace bitset_detail {
size_t BITS_PER_ULL = 64;
class bitset_impl {
public:
typedef boost::shared_ptr<bitset_impl> ptr;
typedef persistent_data::transaction_manager::ptr tm_ptr;
bitset_impl(tm_ptr tm)
bitset_impl(transaction_manager &tm)
: nr_bits_(0),
array_(tm, rc_) {
}
bitset_impl(tm_ptr tm, block_address root, unsigned nr_bits)
bitset_impl(transaction_manager &tm, block_address root, unsigned nr_bits)
: nr_bits_(nr_bits),
array_(tm, rc_, root, nr_bits) {
array_(tm, rc_, root, div_up<unsigned>(nr_bits, BITS_PER_ULL)) {
}
block_address get_root() const {
return array_.get_root();
}
unsigned get_nr_bits() const {
return nr_bits_;
}
void grow(unsigned new_nr_bits, bool default_value) {
pad_last_block(default_value);
resize_array(new_nr_bits, default_value);
@ -77,7 +82,7 @@ namespace persistent_data {
}
void walk_bitset(bitset_visitor &v) const {
bit_visitor vv(v);
bit_visitor vv(v, nr_bits_);
damage_visitor dv(v);
array_.visit_values(vv, dv);
}
@ -85,18 +90,20 @@ namespace persistent_data {
private:
class bit_visitor {
public:
bit_visitor(bitset_visitor &v)
: v_(v) {
bit_visitor(bitset_visitor &v, unsigned nr_bits)
: v_(v),
nr_bits_(nr_bits) {
}
void visit(uint32_t word_index, uint64_t word) {
uint32_t bit_index = word_index * 64;
for (unsigned bit = 0; bit < 64; bit++, bit_index++)
v_.visit(bit_index, !!(word & (1 << bit)));
for (unsigned bit = 0; bit < 64 && bit_index < nr_bits_; bit++, bit_index++)
v_.visit(bit_index, !!(word & (1ULL << bit)));
}
private:
bitset_visitor &v_;
unsigned nr_bits_;
};
class damage_visitor {
@ -112,11 +119,11 @@ namespace persistent_data {
}
private:
optional<uint32_t> lifted_mult64(optional<uint32_t> const &m) {
boost::optional<uint32_t> lifted_mult64(boost::optional<uint32_t> const &m) {
if (!m)
return m;
return optional<uint32_t>(*m * 64);
return boost::optional<uint32_t>(*m * 64);
}
bitset_visitor &v_;
@ -184,7 +191,7 @@ namespace persistent_data {
if (n >= nr_bits_) {
std::ostringstream str;
str << "bitset index out of bounds ("
<< n << " >= " << nr_bits_ << endl;
<< n << " >= " << nr_bits_ << ")";
throw runtime_error(str.str());
}
}
@ -198,12 +205,12 @@ namespace persistent_data {
//----------------------------------------------------------------
persistent_data::bitset::bitset(tm_ptr tm)
persistent_data::bitset::bitset(transaction_manager &tm)
: impl_(new bitset_impl(tm))
{
}
persistent_data::bitset::bitset(tm_ptr tm, block_address root, unsigned nr_bits)
persistent_data::bitset::bitset(transaction_manager &tm, block_address root, unsigned nr_bits)
: impl_(new bitset_impl(tm, root, nr_bits))
{
}
@ -214,6 +221,12 @@ persistent_data::bitset::get_root() const
return impl_->get_root();
}
unsigned
persistent_data::bitset::get_nr_bits() const
{
return impl_->get_nr_bits();
}
void
persistent_data::bitset::grow(unsigned new_nr_bits, bool default_value)
{

View File

@ -16,8 +16,8 @@
// with thin-provisioning-tools. If not, see
// <http://www.gnu.org/licenses/>.
#ifndef BITSET_H
#define BITSET_H
#ifndef PERSISTENT_DATA_DATA_STRUCTURES_BITSET_H
#define PERSISTENT_DATA_DATA_STRUCTURES_BITSET_H
#include "persistent-data/run.h"
@ -49,11 +49,12 @@ namespace persistent_data {
class bitset {
public:
typedef boost::shared_ptr<bitset> ptr;
typedef persistent_data::transaction_manager::ptr tm_ptr;
bitset(tm_ptr tm);
bitset(tm_ptr tm, block_address root, unsigned nr_bits);
bitset(transaction_manager &tm);
bitset(transaction_manager &tm,
block_address root, unsigned nr_bits);
block_address get_root() const;
unsigned get_nr_bits() const;
void grow(unsigned new_nr_bits, bool default_value);
void destroy();

View File

@ -0,0 +1,146 @@
#include "persistent-data/data-structures/bloom_filter.h"
#include <stdexcept>
using namespace persistent_data;
//----------------------------------------------------------------
namespace {
static const uint64_t m1 = 0x9e37fffffffc0001ULL;
static const unsigned bits = 18;
static uint32_t hash1(block_address const &b) {
return (b * m1) >> bits;
}
static uint32_t hash2(block_address const &b) {
uint32_t n = b;
n = n ^ (n >> 16);
n = n * 0x85ebca6bu;
n = n ^ (n >> 13);
n = n * 0xc2b2ae35u;
n = n ^ (n >> 16);
return n;
}
void check_power_of_two(unsigned nr_bits) {
if (nr_bits & (nr_bits - 1))
throw std::runtime_error("bloom filter needs a power of two nr_bits");
}
}
//----------------------------------------------------------------
bloom_filter::bloom_filter(transaction_manager &tm,
unsigned nr_bits, unsigned nr_probes)
: tm_(tm),
bits_(tm),
nr_probes_(nr_probes),
mask_(nr_bits - 1)
{
check_power_of_two(nr_bits);
bits_.grow(nr_bits, false);
}
bloom_filter::bloom_filter(transaction_manager &tm, block_address root,
unsigned nr_bits, unsigned nr_probes)
: tm_(tm),
bits_(tm, root, nr_bits),
nr_probes_(nr_probes),
mask_(nr_bits - 1)
{
check_power_of_two(nr_bits);
}
block_address
bloom_filter::get_root() const
{
return bits_.get_root();
}
bool
bloom_filter::test(uint64_t b)
{
vector<unsigned> probes(nr_probes_);
fill_probes(b, probes);
for (unsigned p = 0; p < nr_probes_; p++)
if (!bits_.get(probes[p]))
return false;
return true;
}
void
bloom_filter::set(uint64_t b)
{
vector<unsigned> probes(nr_probes_);
fill_probes(b, probes);
for (unsigned p = 0; p < nr_probes_; p++)
bits_.set(probes[p], true);
}
void
bloom_filter::flush()
{
bits_.flush();
}
void
bloom_filter::fill_probes(block_address b, vector<unsigned> &probes) const
{
uint32_t h1 = hash1(b) & mask_;
uint32_t h2 = hash2(b) & mask_;
probes[0] = h1;
for (unsigned p = 1; p < nr_probes_; p++) {
h1 = (h1 + h2) & mask_;
h2 = (h2 + p) & mask_;
probes[p] = h1;
}
}
void
bloom_filter::print_debug(ostream &out)
{
print_residency(out);
map<unsigned, unsigned> runs;
for (unsigned i = 0; i < bits_.get_nr_bits();) {
bool v = bits_.get(i);
unsigned run_length = 1;
while (++i < bits_.get_nr_bits() && bits_.get(i) == v)
run_length++;
map<unsigned, unsigned>::iterator it = runs.find(run_length);
if (it != runs.end())
it->second++;
else
runs.insert(make_pair(run_length, 1));
}
{
map<unsigned, unsigned>::const_iterator it;
for (it = runs.begin(); it != runs.end(); ++it)
out << it->first << ": " << it->second << endl;
}
}
void
bloom_filter::print_residency(ostream &out)
{
unsigned count = 0;
for (unsigned i = 0; i < bits_.get_nr_bits(); i++)
if (bits_.get(i))
count++;
out << "residency: " << count << "/" << bits_.get_nr_bits() << endl;
}
//----------------------------------------------------------------

View File

@ -0,0 +1,45 @@
#ifndef PERSISTENT_DATA_DATA_STRUCTURES_BLOOM_FILTER_H
#define PERSISTENT_DATA_DATA_STRUCTURES_BLOOM_FILTER_H
#include "persistent-data/transaction_manager.h"
#include "persistent-data/data-structures/bitset.h"
#include <boost/shared_ptr.hpp>
//----------------------------------------------------------------
namespace persistent_data {
class bloom_filter {
public:
typedef boost::shared_ptr<bloom_filter> ptr;
// nr_bits must be a power of two
bloom_filter(transaction_manager &tm,
unsigned nr_bits, unsigned nr_probes);
bloom_filter(transaction_manager &tm, block_address root,
unsigned nr_bits_power, unsigned nr_probes);
block_address get_root() const;
bool test(uint64_t b); // not const due to caching effects in bitset
void set(uint64_t b);
void flush();
void print_debug(ostream &out);
private:
void print_residency(ostream &out);
void fill_probes(block_address b, vector<unsigned> &probes) const;
transaction_manager &tm_;
persistent_data::bitset bits_;
unsigned nr_probes_;
uint64_t mask_;
};
}
//----------------------------------------------------------------
#endif

View File

@ -19,7 +19,7 @@
#ifndef BTREE_H
#define BTREE_H
#include "persistent-data/endian_utils.h"
#include "base/endian_utils.h"
#include "persistent-data/transaction_manager.h"
#include "persistent-data/data-structures/ref_counter.h"
@ -43,22 +43,6 @@ namespace persistent_data {
space_map::ptr sm_;
};
// FIXME: move to sep file. I don't think it's directly used by
// the btree code.
struct uint64_traits {
typedef base::le64 disk_type;
typedef uint64_t value_type;
typedef no_op_ref_counter<uint64_t> ref_counter;
static void unpack(disk_type const &disk, value_type &value) {
value = base::to_cpu<uint64_t>(disk);
}
static void pack(value_type const &value, disk_type &disk) {
disk = base::to_disk<base::le64>(value);
}
};
struct block_traits {
typedef base::le64 disk_type;
typedef block_address value_type;
@ -179,12 +163,15 @@ namespace persistent_data {
private:
static unsigned calc_max_entries(void);
void check_fits_within_block() const;
void *key_ptr(unsigned i) const;
void *value_ptr(unsigned i) const;
block_address location_;
disk_node *raw_;
mutable bool checked_; // flag indicating we've checked the data fits in the block
};
//------------------------------------------------
@ -197,7 +184,7 @@ namespace persistent_data {
return node_ref<ValueTraits>(
b.get_location(),
reinterpret_cast<disk_node *>(
const_cast<unsigned char *>(b.data().raw())));
const_cast<void *>(b.data())));
}
template <typename ValueTraits>
@ -206,14 +193,13 @@ namespace persistent_data {
{
return node_ref<ValueTraits>(
b.get_location(),
reinterpret_cast<disk_node *>(
const_cast<unsigned char *>(b.data().raw())));
reinterpret_cast<disk_node *>(b.data()));
}
class ro_spine : private boost::noncopyable {
public:
ro_spine(transaction_manager::ptr tm,
block_manager<>::validator::ptr v)
ro_spine(transaction_manager &tm,
bcache::validator::ptr v)
: tm_(tm),
validator_(v) {
}
@ -226,8 +212,8 @@ namespace persistent_data {
}
private:
transaction_manager::ptr tm_;
block_manager<>::validator::ptr validator_;
transaction_manager &tm_;
bcache::validator::ptr validator_;
std::list<block_manager<>::read_ref> spine_;
};
@ -237,8 +223,8 @@ namespace persistent_data {
typedef transaction_manager::write_ref write_ref;
typedef boost::optional<block_address> maybe_block;
shadow_spine(transaction_manager::ptr tm,
block_manager<>::validator::ptr v)
shadow_spine(transaction_manager &tm,
bcache::validator::ptr v)
: tm_(tm),
validator_(v) {
@ -290,8 +276,8 @@ namespace persistent_data {
}
private:
transaction_manager::ptr tm_;
block_manager<>::validator::ptr validator_;
transaction_manager &tm_;
bcache::validator::ptr validator_;
std::list<block_manager<>::write_ref> spine_;
maybe_block root_;
};
@ -349,10 +335,10 @@ namespace persistent_data {
typedef typename btree_detail::node_ref<ValueTraits> leaf_node;
typedef typename btree_detail::node_ref<block_traits> internal_node;
btree(typename persistent_data::transaction_manager::ptr tm,
btree(transaction_manager &tm,
typename ValueTraits::ref_counter rc);
btree(typename transaction_manager::ptr tm,
btree(transaction_manager &tm,
block_address root,
typename ValueTraits::ref_counter rc);
@ -448,12 +434,12 @@ namespace persistent_data {
void inc_children(btree_detail::shadow_spine &spine,
RefCounter &leaf_rc);
typename persistent_data::transaction_manager::ptr tm_;
transaction_manager &tm_;
bool destroy_;
block_address root_;
block_ref_counter internal_rc_;
typename ValueTraits::ref_counter rc_;
typename block_manager<>::validator::ptr validator_;
typename bcache::validator::ptr validator_;
};
};

View File

@ -32,9 +32,9 @@ namespace {
using namespace btree_detail;
using namespace std;
struct btree_node_validator : public block_manager<>::validator {
virtual void check(buffer<> const &b, block_address location) const {
disk_node const *data = reinterpret_cast<disk_node const *>(&b);
struct btree_node_validator : public bcache::validator {
virtual void check(void const *raw, block_address location) const {
disk_node const *data = reinterpret_cast<disk_node const *>(raw);
node_header const *n = &data->header;
crc32c sum(BTREE_CSUM_XOR);
sum.append(&n->flags, MD_BLOCK_SIZE - sizeof(uint32_t));
@ -45,8 +45,8 @@ namespace {
throw checksum_error("bad block nr in btree node");
}
virtual void prepare(buffer<> &b, block_address location) const {
disk_node *data = reinterpret_cast<disk_node *>(&b);
virtual void prepare(void *raw, block_address location) const {
disk_node *data = reinterpret_cast<disk_node *>(raw);
node_header *n = &data->header;
n->blocknr = to_disk<base::le64, uint64_t>(location);
@ -64,7 +64,7 @@ namespace persistent_data {
inline void
ro_spine::step(block_address b)
{
spine_.push_back(tm_->read_lock(b, validator_));
spine_.push_back(tm_.read_lock(b, validator_));
if (spine_.size() > 2)
spine_.pop_front();
}
@ -72,11 +72,11 @@ namespace persistent_data {
inline bool
shadow_spine::step(block_address b)
{
pair<write_ref, bool> p = tm_->shadow(b, validator_);
pair<write_ref, bool> p = tm_.shadow(b, validator_);
try {
step(p.first);
} catch (...) {
tm_->get_sm()->dec(p.first.get_location());
tm_.get_sm()->dec(p.first.get_location());
throw;
}
return p.second;
@ -87,7 +87,8 @@ namespace persistent_data {
template <typename ValueTraits>
node_ref<ValueTraits>::node_ref(block_address location, disk_node *raw)
: location_(location),
raw_(raw)
raw_(raw),
checked_(false)
{
}
@ -330,6 +331,8 @@ namespace persistent_data {
void *
node_ref<ValueTraits>::key_ptr(unsigned i) const
{
check_fits_within_block();
return raw_->keys + i;
}
@ -337,6 +340,8 @@ namespace persistent_data {
void *
node_ref<ValueTraits>::value_ptr(unsigned i) const
{
check_fits_within_block();
void *value_base = &raw_->keys[to_cpu<uint32_t>(raw_->header.max_entries)];
return static_cast<unsigned char *>(value_base) +
sizeof(typename ValueTraits::disk_type) * i;
@ -357,40 +362,75 @@ namespace persistent_data {
}
}
template <typename ValueTraits>
void
node_ref<ValueTraits>::check_fits_within_block() const {
if (checked_)
return;
if (sizeof(typename ValueTraits::disk_type) != get_value_size()) {
std::ostringstream out;
out << "value size mismatch: expected " << sizeof(typename ValueTraits::disk_type)
<< ", but got " << get_value_size()
<< ". This is not the btree you are looking for." << std::endl;
throw std::runtime_error(out.str());
}
unsigned max = calc_max_entries();
if (max < get_nr_entries()) {
std::ostringstream out;
out << "Bad nr of elements: max per block = "
<< max << ", actual = " << get_nr_entries() << std::endl;
throw std::runtime_error(out.str());
}
checked_ = true;
}
//--------------------------------
template <unsigned Levels, typename ValueTraits>
btree<Levels, ValueTraits>::
btree(typename transaction_manager::ptr tm,
btree(transaction_manager &tm,
typename ValueTraits::ref_counter rc)
: tm_(tm),
destroy_(false),
internal_rc_(tm->get_sm()),
internal_rc_(tm.get_sm()),
rc_(rc),
validator_(new btree_node_validator)
{
using namespace btree_detail;
write_ref root = tm_->new_block(validator_);
write_ref root = tm_.new_block(validator_);
leaf_node n = to_node<ValueTraits>(root);
n.set_type(btree_detail::LEAF);
n.set_nr_entries(0);
n.set_max_entries();
n.set_value_size(sizeof(typename ValueTraits::disk_type));
if (Levels > 1) {
internal_node n = to_node<block_traits>(root);
n.set_type(btree_detail::LEAF);
n.set_nr_entries(0);
n.set_max_entries();
n.set_value_size(sizeof(typename block_traits::disk_type));
} else {
leaf_node n = to_node<ValueTraits>(root);
n.set_type(btree_detail::LEAF);
n.set_nr_entries(0);
n.set_max_entries();
n.set_value_size(sizeof(typename ValueTraits::disk_type));
}
root_ = root.get_location();
}
template <unsigned Levels, typename ValueTraits>
btree<Levels, ValueTraits>::
btree(typename transaction_manager::ptr tm,
btree(transaction_manager &tm,
block_address root,
typename ValueTraits::ref_counter rc)
: tm_(tm),
destroy_(false),
root_(root),
internal_rc_(tm->get_sm()),
internal_rc_(tm.get_sm()),
rc_(rc),
validator_(new btree_node_validator)
{
@ -519,7 +559,7 @@ namespace persistent_data {
typename btree<Levels, ValueTraits>::ptr
btree<Levels, ValueTraits>::clone() const
{
tm_->get_sm()->inc(root_);
tm_.get_sm()->inc(root_);
return ptr(new btree<Levels, ValueTraits>(tm_, root_, rc_));
}
@ -595,13 +635,13 @@ namespace persistent_data {
node_type type;
unsigned nr_left, nr_right;
write_ref left = tm_->new_block(validator_);
write_ref left = tm_.new_block(validator_);
node_ref<ValueTraits> l = to_node<ValueTraits>(left);
l.set_nr_entries(0);
l.set_max_entries();
l.set_value_size(sizeof(typename ValueTraits::disk_type));
write_ref right = tm_->new_block(validator_);
write_ref right = tm_.new_block(validator_);
node_ref<ValueTraits> r = to_node<ValueTraits>(right);
r.set_nr_entries(0);
r.set_max_entries();
@ -655,7 +695,7 @@ namespace persistent_data {
node_ref<ValueTraits> l = spine.template get_node<ValueTraits>();
block_address left = spine.get_block();
write_ref right = tm_->new_block(validator_);
write_ref right = tm_.new_block(validator_);
node_ref<ValueTraits> r = to_node<ValueTraits>(right);
unsigned nr_left = l.get_nr_entries() / 2;
@ -782,12 +822,15 @@ namespace persistent_data {
{
using namespace btree_detail;
read_ref blk = tm_->read_lock(b, validator_);
read_ref blk = tm_.read_lock(b, validator_);
internal_node o = to_node<block_traits>(blk);
// FIXME: use a switch statement
if (o.get_type() == INTERNAL) {
if (v.visit_internal(loc, o))
if (v.visit_internal(loc, o)) {
for (unsigned i = 0; i < o.get_nr_entries(); i++)
tm_.prefetch(o.value_at(i));
for (unsigned i = 0; i < o.get_nr_entries(); i++) {
node_location loc2(loc);
@ -796,6 +839,7 @@ namespace persistent_data {
walk_tree(v, loc2, o.value_at(i));
}
}
} else if (loc.path.size() < Levels - 1) {
if (v.visit_internal_leaf(loc, o))

View File

@ -85,23 +85,31 @@ namespace persistent_data {
// different sub tree (by looking at the btree_path).
class path_tracker {
public:
path_tracker() {
// We push an empty path, to ensure there
// is always a current_path.
paths_.push_back(btree_path());
}
// returns the old path if the tree has changed.
boost::optional<btree_path> next_path(btree_path const &p) {
if (p != path_) {
btree_path tmp(path_);
path_ = p;
return boost::optional<btree_path>(tmp);
btree_path const *next_path(btree_path const &p) {
if (p != current_path()) {
if (paths_.size() == 2)
paths_.pop_front();
paths_.push_back(p);
return &paths_.front();
}
return boost::optional<btree_path>();
return NULL;
}
btree_path const &current_path() const {
return path_;
return paths_.back();
}
private:
btree_path path_;
std::list<btree_path> paths_;
};
//----------------------------------------------------------------
@ -189,11 +197,12 @@ namespace persistent_data {
private:
void visit_values(btree_path const &path,
node_ref<ValueTraits> const &n) {
btree_path p2(path);
unsigned nr = n.get_nr_entries();
for (unsigned i = 0; i < nr; i++) {
btree_path p2(path);
p2.push_back(n.key_at(i));
value_visitor_.visit(p2, n.value_at(i));
p2.pop_back();
}
}
@ -427,7 +436,7 @@ namespace persistent_data {
}
void update_path(btree_path const &path) {
boost::optional<btree_path> old_path = path_tracker_.next_path(path);
btree_path const *old_path = path_tracker_.next_path(path);
if (old_path)
// we need to emit any errors that
// were accrued against the old

View File

@ -0,0 +1,38 @@
#ifndef PERSISTENT_DATA_DATA_STRUCTURES_SIMPLE_TRAITS_H
#define PERSISTENT_DATA_DATA_STRUCTURES_SIMPLE_TRAITS_H
//----------------------------------------------------------------
namespace persistent_data {
struct uint64_traits {
typedef base::le64 disk_type;
typedef uint64_t value_type;
typedef no_op_ref_counter<uint64_t> ref_counter;
static void unpack(disk_type const &disk, value_type &value) {
value = base::to_cpu<uint64_t>(disk);
}
static void pack(value_type const &value, disk_type &disk) {
disk = base::to_disk<base::le64>(value);
}
};
struct uint32_traits {
typedef base::le32 disk_type;
typedef uint32_t value_type;
typedef no_op_ref_counter<uint32_t> ref_counter;
static void unpack(disk_type const &disk, value_type &value) {
value = base::to_cpu<uint32_t>(disk);
}
static void pack(value_type const &value, disk_type &disk) {
disk = base::to_disk<base::le32>(value);
}
};
}
//----------------------------------------------------------------
#endif

View File

@ -1,64 +0,0 @@
// Copyright (C) 2011 Red Hat, Inc. All rights reserved.
//
// This file is part of the thin-provisioning-tools source.
//
// thin-provisioning-tools is free software: you can redistribute it
// and/or modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation, either version 3 of
// the License, or (at your option) any later version.
//
// thin-provisioning-tools is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with thin-provisioning-tools. If not, see
// <http://www.gnu.org/licenses/>.
#include "endian_utils.h"
using namespace base;
//----------------------------------------------------------------
bool
base::test_bit_le(void const *bits, unsigned b)
{
le64 const *w = reinterpret_cast<le64 const *>(bits);
w += b / 64;
uint64_t v = to_cpu<uint64_t>(*w);
uint64_t mask = 1;
mask = mask << (b % 64);
return (v & mask) ? true : false;
}
void
base::set_bit_le(void *bits, unsigned b)
{
le64 *w = reinterpret_cast<le64 *>(bits);
w += b / 64;
uint64_t v = to_cpu<uint64_t>(*w);
uint64_t mask = 1;
mask = mask << (b % 64);
v |= mask;
*w = to_disk<le64>(v);
}
void
base::clear_bit_le(void *bits, unsigned b)
{
le64 *w = reinterpret_cast<le64 *>(bits);
w += b / 64;
uint64_t v = to_cpu<uint64_t>(*w);
uint64_t mask = 1;
mask = mask << (b % 64);
mask = ~mask;
v &= mask;
*w = to_disk<le64>(v);
}
//----------------------------------------------------------------

View File

@ -1,110 +0,0 @@
// Copyright (C) 2011 Red Hat, Inc. All rights reserved.
//
// This file is part of the thin-provisioning-tools source.
//
// thin-provisioning-tools is free software: you can redistribute it
// and/or modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation, either version 3 of
// the License, or (at your option) any later version.
//
// thin-provisioning-tools is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with thin-provisioning-tools. If not, see
// <http://www.gnu.org/licenses/>.
#ifndef ENDIAN_H
#define ENDIAN_H
#include <endian.h>
#include <stdint.h>
#include <boost/static_assert.hpp>
//----------------------------------------------------------------
// FIXME: rename to endian
namespace base {
// These are just little wrapper types to make the compiler
// understand that the le types are not assignable to the
// corresponding cpu type.
struct le16 {
explicit le16(uint16_t v = 0)
: v_(v) {
}
uint16_t v_;
} __attribute__((packed));
struct le32 {
explicit le32(uint32_t v = 0)
: v_(v) {
}
uint32_t v_;
} __attribute__((packed));
struct le64 {
explicit le64(uint64_t v = 0)
: v_(v) {
}
uint64_t v_;
} __attribute__((packed));
//--------------------------------
// FIXME: actually do the conversions !
template <typename CPUType, typename DiskType>
CPUType to_cpu(DiskType const &d) {
BOOST_STATIC_ASSERT(sizeof(d) == 0);
}
template <typename DiskType, typename CPUType>
DiskType to_disk(CPUType const &v) {
BOOST_STATIC_ASSERT(sizeof(v) == 0);
}
template <>
inline uint16_t to_cpu<uint16_t, le16>(le16 const &d) {
return le16toh(d.v_);
}
template <>
inline le16 to_disk<le16, uint16_t>(uint16_t const &v) {
return le16(htole16(v));
}
template <>
inline uint32_t to_cpu<uint32_t, le32>(le32 const &d) {
return le32toh(d.v_);
}
template <>
inline le32 to_disk<le32, uint32_t>(uint32_t const &v) {
return le32(htole32(v));
}
template <>
inline uint64_t to_cpu<uint64_t, le64>(le64 const &d) {
return le64toh(d.v_);
}
template <>
inline le64 to_disk<le64, uint64_t>(uint64_t const &v) {
return le64(htole64(v));
}
//--------------------------------
bool test_bit_le(void const *bits, unsigned b);
void set_bit_le(void *bits, unsigned b);
void clear_bit_le(void *bits, unsigned b);
}
//----------------------------------------------------------------
#endif

View File

@ -48,7 +48,7 @@ persistent_data::get_nr_blocks(string const &path)
}
persistent_data::block_manager<>::ptr
persistent_data::open_bm(std::string const &dev_path, block_io<>::mode m)
persistent_data::open_bm(std::string const &dev_path, block_manager<>::mode m)
{
block_address nr_blocks = get_nr_blocks(dev_path);
return block_manager<>::ptr(new block_manager<>(dev_path, nr_blocks, 1, m));

View File

@ -10,7 +10,7 @@
// FIXME: move to a different unit
namespace persistent_data {
persistent_data::block_address get_nr_blocks(string const &path);
block_manager<>::ptr open_bm(std::string const &dev_path, block_io<>::mode m);
block_manager<>::ptr open_bm(std::string const &dev_path, block_manager<>::mode m);
void check_file_exists(std::string const &file);
}

View File

@ -1,127 +0,0 @@
// Copyright (C) 2012 Red Hat, Inc. All rights reserved.
//
// This file is part of the thin-provisioning-tools source.
//
// thin-provisioning-tools is free software: you can redistribute it
// and/or modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation, either version 3 of
// the License, or (at your option) any later version.
//
// thin-provisioning-tools is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with thin-provisioning-tools. If not, see
// <http://www.gnu.org/licenses/>.
#include "lock_tracker.h"
#include <stdexcept>
using namespace persistent_data;
using namespace std;
//----------------------------------------------------------------
lock_tracker::lock_tracker(uint64_t low, uint64_t high)
: low_(low),
high_(high)
{
}
void
lock_tracker::read_lock(uint64_t key)
{
check_key(key);
LockMap::iterator it = locks_.find(key);
if (found(it)) {
if (it->second < 0)
throw runtime_error("already write locked");
it->second++;
} else
locks_.insert(make_pair(key, 1));
}
void
lock_tracker::write_lock(uint64_t key)
{
check_key(key);
LockMap::const_iterator it = locks_.find(key);
if (found(it))
throw runtime_error("already locked");
locks_.insert(make_pair(key, -1));
}
void
lock_tracker::superblock_lock(uint64_t key)
{
if (superblock_)
throw runtime_error("superblock already held");
superblock_ = boost::optional<uint64_t>(key);
try {
write_lock(key);
} catch (...) {
superblock_ = boost::optional<uint64_t>();
}
}
void
lock_tracker::unlock(uint64_t key)
{
check_key(key);
LockMap::const_iterator it = locks_.find(key);
if (!found(it))
throw runtime_error("not locked");
if (superblock_ && *superblock_ == key) {
if (locks_.size() > 1)
throw runtime_error("superblock unlocked while other locks still held");
superblock_ = boost::optional<uint64_t>();
}
if (it->second > 1)
locks_.insert(make_pair(key, it->second - 1));
else
locks_.erase(key);
}
bool
lock_tracker::found(LockMap::const_iterator it) const
{
return it != locks_.end();
}
bool
lock_tracker::valid_key(uint64_t key) const
{
return (key >= low_ && key <= high_);
}
void
lock_tracker::check_key(uint64_t key) const
{
if (!valid_key(key))
throw runtime_error("invalid key");
}
bool
lock_tracker::is_locked(uint64_t key) const
{
check_key(key);
return found(locks_.find(key));
}
//----------------------------------------------------------------

View File

@ -1,61 +0,0 @@
// Copyright (C) 2011 Red Hat, Inc. All rights reserved.
//
// This file is part of the thin-provisioning-tools source.
//
// thin-provisioning-tools is free software: you can redistribute it
// and/or modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation, either version 3 of
// the License, or (at your option) any later version.
//
// thin-provisioning-tools is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with thin-provisioning-tools. If not, see
// <http://www.gnu.org/licenses/>.
#ifndef LOCK_TRACKER_H
#define LOCK_TRACKER_H
#include <boost/noncopyable.hpp>
#include <boost/optional.hpp>
#include <map>
#include <stdint.h>
//----------------------------------------------------------------
namespace persistent_data {
class lock_tracker : private boost::noncopyable {
public:
lock_tracker(uint64_t low, uint64_t high);
void read_lock(uint64_t key);
void write_lock(uint64_t key);
void superblock_lock(uint64_t key);
void unlock(uint64_t key);
bool is_locked(uint64_t key) const;
private:
typedef std::map<uint64_t, int> LockMap;
bool found(LockMap::const_iterator it) const;
bool valid_key(uint64_t key) const;
void check_key(uint64_t key) const;
// Positive for read lock, negative for write lock
LockMap locks_;
boost::optional<uint64_t> superblock_;
uint64_t low_;
uint64_t high_;
};
}
//----------------------------------------------------------------
#endif

View File

@ -99,6 +99,8 @@ namespace base {
replacement.insert(run<T>());
else {
typename rset::const_iterator b = runs_.begin();
// Some versions of gcc give a spurious warning here.
maybe last = b->end_;
if (b->begin_)

View File

@ -16,6 +16,8 @@
// with thin-provisioning-tools. If not, see
// <http://www.gnu.org/licenses/>.
#include "base/endian_utils.h"
#include "persistent-data/space-maps/disk.h"
#include "persistent-data/space-maps/disk_structures.h"
#include "persistent-data/space-maps/recursive.h"
@ -24,7 +26,6 @@
#include "persistent-data/data-structures/btree_damage_visitor.h"
#include "persistent-data/data-structures/btree_counter.h"
#include "persistent-data/checksum.h"
#include "persistent-data/endian_utils.h"
#include "persistent-data/math_utils.h"
#include "persistent-data/transaction_manager.h"
@ -37,9 +38,9 @@ using namespace sm_disk_detail;
namespace {
uint64_t const BITMAP_CSUM_XOR = 240779;
struct bitmap_block_validator : public block_manager<>::validator {
virtual void check(buffer<> const &b, block_address location) const {
bitmap_header const *data = reinterpret_cast<bitmap_header const *>(&b);
struct bitmap_block_validator : public bcache::validator {
virtual void check(void const *raw, block_address location) const {
bitmap_header const *data = reinterpret_cast<bitmap_header const *>(raw);
crc32c sum(BITMAP_CSUM_XOR);
sum.append(&data->not_used, MD_BLOCK_SIZE - sizeof(uint32_t));
if (sum.get_sum() != to_cpu<uint32_t>(data->csum))
@ -49,8 +50,8 @@ namespace {
throw checksum_error("bad block nr in space map bitmap");
}
virtual void prepare(buffer<> &b, block_address location) const {
bitmap_header *data = reinterpret_cast<bitmap_header *>(&b);
virtual void prepare(void *raw, block_address location) const {
bitmap_header *data = reinterpret_cast<bitmap_header *>(raw);
data->blocknr = to_disk<base::le64, uint64_t>(location);
crc32c sum(BITMAP_CSUM_XOR);
@ -64,9 +65,9 @@ namespace {
uint64_t const INDEX_CSUM_XOR = 160478;
// FIXME: factor out the common code in these validators
struct index_block_validator : public block_manager<>::validator {
virtual void check(buffer<> const &b, block_address location) const {
metadata_index const *mi = reinterpret_cast<metadata_index const *>(&b);
struct index_block_validator : public bcache::validator {
virtual void check(void const *raw, block_address location) const {
metadata_index const *mi = reinterpret_cast<metadata_index const *>(raw);
crc32c sum(INDEX_CSUM_XOR);
sum.append(&mi->padding_, MD_BLOCK_SIZE - sizeof(uint32_t));
if (sum.get_sum() != to_cpu<uint32_t>(mi->csum_))
@ -76,8 +77,8 @@ namespace {
throw checksum_error("bad block nr in metadata index block");
}
virtual void prepare(buffer<> &b, block_address location) const {
metadata_index *mi = reinterpret_cast<metadata_index *>(&b);
virtual void prepare(void *raw, block_address location) const {
metadata_index *mi = reinterpret_cast<metadata_index *>(raw);
mi->blocknr_ = to_disk<base::le64, uint64_t>(location);
crc32c sum(INDEX_CSUM_XOR);
@ -86,9 +87,9 @@ namespace {
}
};
block_manager<>::validator::ptr
bcache::validator::ptr
index_validator() {
return block_manager<>::validator::ptr(new index_block_validator());
return bcache::validator::ptr(new index_block_validator());
}
//--------------------------------
@ -98,26 +99,26 @@ namespace {
typedef transaction_manager::read_ref read_ref;
typedef transaction_manager::write_ref write_ref;
bitmap(transaction_manager::ptr tm,
bitmap(transaction_manager &tm,
index_entry const &ie,
block_manager<>::validator::ptr v)
bcache::validator::ptr v)
: tm_(tm),
validator_(v),
ie_(ie) {
}
ref_t lookup(unsigned b) const {
read_ref rr = tm_->read_lock(ie_.blocknr_, validator_);
read_ref rr = tm_.read_lock(ie_.blocknr_, validator_);
void const *bits = bitmap_data(rr);
ref_t b1 = test_bit_le(bits, b * 2);
ref_t b2 = test_bit_le(bits, b * 2 + 1);
ref_t result = b2 ? 1 : 0;
result |= b1 ? 0b10 : 0;
result |= b1 ? 2 : 0;
return result;
}
void insert(unsigned b, ref_t n) {
write_ref wr = tm_->shadow(ie_.blocknr_, validator_).first;
write_ref wr = tm_.shadow(ie_.blocknr_, validator_).first;
void *bits = bitmap_data(wr);
bool was_free = !test_bit_le(bits, b * 2) && !test_bit_le(bits, b * 2 + 1);
if (n == 1 || n == 3)
@ -158,31 +159,31 @@ namespace {
}
void iterate(block_address offset, block_address hi, space_map::iterator &it) const {
read_ref rr = tm_->read_lock(ie_.blocknr_, validator_);
read_ref rr = tm_.read_lock(ie_.blocknr_, validator_);
void const *bits = bitmap_data(rr);
for (unsigned b = 0; b < hi; b++) {
ref_t b1 = test_bit_le(bits, b * 2);
ref_t b2 = test_bit_le(bits, b * 2 + 1);
ref_t result = b2 ? 1 : 0;
result |= b1 ? 0b10 : 0;
result |= b1 ? 2 : 0;
it(offset + b, result);
}
}
private:
void *bitmap_data(transaction_manager::write_ref &wr) {
bitmap_header *h = reinterpret_cast<bitmap_header *>(&wr.data()[0]);
bitmap_header *h = reinterpret_cast<bitmap_header *>(wr.data());
return h + 1;
}
void const *bitmap_data(transaction_manager::read_ref &rr) const {
bitmap_header const *h = reinterpret_cast<bitmap_header const *>(&rr.data()[0]);
bitmap_header const *h = reinterpret_cast<bitmap_header const *>(rr.data());
return h + 1;
}
transaction_manager::ptr tm_;
block_manager<>::validator::ptr validator_;
transaction_manager &tm_;
bcache::validator::ptr validator_;
index_entry ie_;
};
@ -242,7 +243,7 @@ namespace {
typedef transaction_manager::write_ref write_ref;
sm_disk(index_store::ptr indexes,
transaction_manager::ptr tm)
transaction_manager &tm)
: tm_(tm),
bitmap_validator_(new bitmap_block_validator),
indexes_(indexes),
@ -252,7 +253,7 @@ namespace {
}
sm_disk(index_store::ptr indexes,
transaction_manager::ptr tm,
transaction_manager &tm,
sm_root const &root)
: tm_(tm),
bitmap_validator_(new bitmap_block_validator),
@ -355,7 +356,7 @@ namespace {
indexes_->resize(bitmap_count);
for (block_address i = old_bitmap_count; i < bitmap_count; i++) {
write_ref wr = tm_->new_block(bitmap_validator_);
write_ref wr = tm_.new_block(bitmap_validator_);
index_entry ie;
ie.blocknr_ = wr.get_location();
@ -445,7 +446,7 @@ namespace {
}
protected:
transaction_manager::ptr get_tm() const {
transaction_manager &get_tm() const {
return tm_;
}
@ -509,8 +510,8 @@ namespace {
ref_counts_.remove(key);
}
transaction_manager::ptr tm_;
block_manager<>::validator::ptr bitmap_validator_;
transaction_manager &tm_;
bcache::validator::ptr bitmap_validator_;
index_store::ptr indexes_;
block_address nr_blocks_;
block_address nr_allocated_;
@ -552,12 +553,12 @@ namespace {
public:
typedef boost::shared_ptr<btree_index_store> ptr;
btree_index_store(transaction_manager::ptr tm)
btree_index_store(transaction_manager &tm)
: tm_(tm),
bitmaps_(tm, index_entry_traits::ref_counter()) {
}
btree_index_store(transaction_manager::ptr tm,
btree_index_store(transaction_manager &tm,
block_address root)
: tm_(tm),
bitmaps_(tm, root, index_entry_traits::ref_counter()) {
@ -623,7 +624,7 @@ namespace {
}
private:
transaction_manager::ptr tm_;
transaction_manager &tm_;
btree<1, index_entry_traits> bitmaps_;
};
@ -631,13 +632,13 @@ namespace {
public:
typedef boost::shared_ptr<metadata_index_store> ptr;
metadata_index_store(transaction_manager::ptr tm)
metadata_index_store(transaction_manager &tm)
: tm_(tm) {
block_manager<>::write_ref wr = tm_->new_block(index_validator());
block_manager<>::write_ref wr = tm_.new_block(index_validator());
bitmap_root_ = wr.get_location();
}
metadata_index_store(transaction_manager::ptr tm, block_address root, block_address nr_indexes)
metadata_index_store(transaction_manager &tm, block_address root, block_address nr_indexes)
: tm_(tm),
bitmap_root_(root) {
resize(nr_indexes);
@ -667,10 +668,10 @@ namespace {
virtual void commit_ies() {
std::pair<block_manager<>::write_ref, bool> p =
tm_->shadow(bitmap_root_, index_validator());
tm_.shadow(bitmap_root_, index_validator());
bitmap_root_ = p.first.get_location();
metadata_index *mdi = reinterpret_cast<metadata_index *>(&p.first.data());
metadata_index *mdi = reinterpret_cast<metadata_index *>(p.first.data());
for (unsigned i = 0; i < entries_.size(); i++)
index_entry_traits::pack(entries_[i], mdi->index[i]);
@ -701,14 +702,14 @@ namespace {
private:
void load_ies() {
block_manager<>::read_ref rr =
tm_->read_lock(bitmap_root_, index_validator());
tm_.read_lock(bitmap_root_, index_validator());
metadata_index const *mdi = reinterpret_cast<metadata_index const *>(&rr.data());
metadata_index const *mdi = reinterpret_cast<metadata_index const *>(rr.data());
for (unsigned i = 0; i < entries_.size(); i++)
index_entry_traits::unpack(*(mdi->index + i), entries_[i]);
}
transaction_manager::ptr tm_;
transaction_manager &tm_;
block_address bitmap_root_;
std::vector<index_entry> entries_;
};
@ -717,7 +718,7 @@ namespace {
//----------------------------------------------------------------
checked_space_map::ptr
persistent_data::create_disk_sm(transaction_manager::ptr tm,
persistent_data::create_disk_sm(transaction_manager &tm,
block_address nr_blocks)
{
index_store::ptr store(new btree_index_store(tm));
@ -728,7 +729,7 @@ persistent_data::create_disk_sm(transaction_manager::ptr tm,
}
checked_space_map::ptr
persistent_data::open_disk_sm(transaction_manager::ptr tm, void *root)
persistent_data::open_disk_sm(transaction_manager &tm, void *root)
{
sm_root_disk d;
sm_root v;
@ -740,7 +741,7 @@ persistent_data::open_disk_sm(transaction_manager::ptr tm, void *root)
}
checked_space_map::ptr
persistent_data::create_metadata_sm(transaction_manager::ptr tm, block_address nr_blocks)
persistent_data::create_metadata_sm(transaction_manager &tm, block_address nr_blocks)
{
index_store::ptr store(new metadata_index_store(tm));
checked_space_map::ptr sm(new sm_disk(store, tm));
@ -751,7 +752,7 @@ persistent_data::create_metadata_sm(transaction_manager::ptr tm, block_address n
}
checked_space_map::ptr
persistent_data::open_metadata_sm(transaction_manager::ptr tm, void *root)
persistent_data::open_metadata_sm(transaction_manager &tm, void *root)
{
sm_root_disk d;
sm_root v;

View File

@ -26,16 +26,16 @@
namespace persistent_data {
checked_space_map::ptr
create_disk_sm(transaction_manager::ptr tm, block_address nr_blocks);
create_disk_sm(transaction_manager &tm, block_address nr_blocks);
checked_space_map::ptr
open_disk_sm(transaction_manager::ptr tm, void *root);
open_disk_sm(transaction_manager &tm, void *root);
checked_space_map::ptr
create_metadata_sm(transaction_manager::ptr tm, block_address nr_blocks);
create_metadata_sm(transaction_manager &tm, block_address nr_blocks);
checked_space_map::ptr
open_metadata_sm(transaction_manager::ptr tm, void *root);
open_metadata_sm(transaction_manager &tm, void *root);
}
//----------------------------------------------------------------

View File

@ -19,7 +19,7 @@
#ifndef SPACE_MAP_DISK_STRUCTURES_H
#define SPACE_MAP_DISK_STRUCTURES_H
#include "persistent-data/endian_utils.h"
#include "base/endian_utils.h"
// FIXME: what's this included for?
#include "persistent-data/data-structures/btree.h"

View File

@ -19,6 +19,8 @@
#include "persistent-data/space-maps/recursive.h"
#include "persistent-data/space-maps/subtracting_span_iterator.h"
#include <list>
using namespace persistent_data;
//----------------------------------------------------------------
@ -290,7 +292,7 @@ namespace {
BOP_SET
};
typedef map<block_address, list<block_op> > op_map;
typedef map<block_address, std::list<block_op> > op_map;
op_map ops_;
subtracting_span_iterator::block_set allocated_blocks_;

View File

@ -119,6 +119,7 @@ namespace persistent_data {
namespace space_map_detail {
class damage {
public:
virtual ~damage() {}
};

View File

@ -72,7 +72,7 @@ transaction_manager::shadow(block_address orig, validator v)
throw runtime_error("transaction_manager::shadow() couldn't allocate new block");
write_ref dest = bm_->write_lock_zero(*mb, v);
::memcpy(dest.data().raw(), src.data().raw(), MD_BLOCK_SIZE); // FIXME: use buffer copy method
::memcpy(dest.data(), src.data(), MD_BLOCK_SIZE);
sm_->dec(orig);
add_shadow(dest.get_location());

View File

@ -33,7 +33,7 @@ namespace persistent_data {
typedef boost::shared_ptr<transaction_manager> ptr;
typedef block_manager<>::read_ref read_ref;
typedef block_manager<>::write_ref write_ref;
typedef block_manager<>::validator::ptr validator;
typedef bcache::validator::ptr validator;
// If the space map is persistent, then the caller should
// hold onto a reference and remember to call sm_->commit()
@ -66,6 +66,10 @@ namespace persistent_data {
return bm_;
}
void prefetch(block_address b) {
bm_->prefetch(b);
}
private:
void add_shadow(block_address b);
void remove_shadow(block_address b);