2014-07-22 21:13:44 +05:30
|
|
|
#include "block-cache/block_cache.h"
|
|
|
|
|
2016-02-04 14:27:41 +05:30
|
|
|
#include <algorithm>
|
2014-07-22 21:13:44 +05:30
|
|
|
#include <assert.h>
|
|
|
|
#include <libaio.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <pthread.h>
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
|
2014-07-25 15:05:04 +05:30
|
|
|
#include <iostream>
|
2014-07-25 20:44:24 +05:30
|
|
|
#include <stdexcept>
|
2014-07-28 18:43:28 +05:30
|
|
|
#include <sstream>
|
2014-07-25 15:05:04 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
using namespace bcache;
|
|
|
|
|
2014-07-25 15:05:04 +05:30
|
|
|
//----------------------------------------------------------------
|
|
|
|
|
2014-07-22 21:13:44 +05:30
|
|
|
// FIXME: get from linux headers
|
|
|
|
#define SECTOR_SHIFT 9
|
|
|
|
#define PAGE_SIZE 4096
|
|
|
|
|
|
|
|
#define MIN_BLOCKS 16
|
|
|
|
#define WRITEBACK_LOW_THRESHOLD_PERCENT 33
|
|
|
|
#define WRITEBACK_HIGH_THRESHOLD_PERCENT 66
|
|
|
|
|
2014-07-25 15:05:04 +05:30
|
|
|
//----------------------------------------------------------------
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-25 15:05:04 +05:30
|
|
|
namespace {
|
|
|
|
void *alloc_aligned(size_t len, size_t alignment)
|
|
|
|
{
|
|
|
|
void *result = NULL;
|
|
|
|
int r = posix_memalign(&result, alignment, len);
|
|
|
|
if (r)
|
|
|
|
return NULL;
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-25 15:05:04 +05:30
|
|
|
return result;
|
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
}
|
|
|
|
|
2014-07-25 15:05:04 +05:30
|
|
|
//----------------------------------------------------------------
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
int
|
|
|
|
block_cache::init_free_list(unsigned count)
|
|
|
|
{
|
|
|
|
size_t block_size = block_size_ << SECTOR_SHIFT;
|
2016-02-04 14:27:41 +05:30
|
|
|
unsigned char *data = static_cast<unsigned char *>(alloc_aligned(count * block_size, PAGE_SIZE));
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
/* Allocate the data for each block. We page align the data. */
|
2016-02-04 14:27:41 +05:30
|
|
|
if (!data)
|
2014-07-30 16:57:33 +05:30
|
|
|
return -ENOMEM;
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
blocks_data_ = data;
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2016-02-04 14:27:41 +05:30
|
|
|
for (unsigned i = 0; i < count; i++) {
|
|
|
|
block &b = (*blocks_memory_)[i];
|
|
|
|
b.data_ = data + (block_size * i);
|
|
|
|
free_.push_front(b);
|
2014-07-25 15:05:04 +05:30
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
return 0;
|
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-08-21 17:24:39 +05:30
|
|
|
void
|
|
|
|
block_cache::exit_free_list()
|
|
|
|
{
|
|
|
|
if (blocks_data_)
|
|
|
|
free(blocks_data_);
|
|
|
|
}
|
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
block_cache::block *
|
|
|
|
block_cache::__alloc_block()
|
|
|
|
{
|
2016-02-04 14:27:41 +05:30
|
|
|
if (free_.empty())
|
2014-07-30 16:57:33 +05:30
|
|
|
return NULL;
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2016-02-04 14:27:41 +05:30
|
|
|
block &b = free_.front();
|
|
|
|
b.unlink();
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2016-02-04 14:27:41 +05:30
|
|
|
return &b;
|
2014-07-30 16:57:33 +05:30
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
/*----------------------------------------------------------------
|
|
|
|
* Low level IO handling
|
|
|
|
*
|
|
|
|
* We cannot have two concurrent writes on the same block.
|
|
|
|
* eg, background writeback, put with dirty, flush?
|
|
|
|
*
|
|
|
|
* To avoid this we introduce some restrictions:
|
|
|
|
*
|
|
|
|
* i) A held block can never be written back.
|
|
|
|
* ii) You cannot get a block until writeback has completed.
|
|
|
|
*
|
|
|
|
*--------------------------------------------------------------*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This can be called from the context of the aio thread. So we have a
|
|
|
|
* separate 'top half' complete function that we know is only called by the
|
|
|
|
* main cache thread.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
block_cache::complete_io(block &b, int result)
|
|
|
|
{
|
|
|
|
b.error_ = result;
|
|
|
|
b.clear_flags(BF_IO_PENDING);
|
|
|
|
nr_io_pending_--;
|
|
|
|
|
2016-02-04 14:27:41 +05:30
|
|
|
if (b.error_) {
|
|
|
|
b.unlink();
|
|
|
|
errored_.push_back(b);
|
|
|
|
|
|
|
|
} else {
|
2014-07-30 16:57:33 +05:30
|
|
|
if (b.test_flags(BF_DIRTY)) {
|
|
|
|
b.clear_flags(BF_DIRTY | BF_PREVIOUSLY_DIRTY);
|
|
|
|
nr_dirty_--;
|
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2016-02-04 14:27:41 +05:30
|
|
|
b.unlink();
|
|
|
|
clean_.push_back(b);
|
2014-07-22 21:13:44 +05:30
|
|
|
}
|
2014-07-30 16:57:33 +05:30
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
/*
|
|
|
|
* |b->list| should be valid (either pointing to itself, on one of the other
|
|
|
|
* lists.
|
|
|
|
*/
|
|
|
|
// FIXME: add batch issue
|
|
|
|
void
|
|
|
|
block_cache::issue_low_level(block &b, enum io_iocb_cmd opcode, const char *desc)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
iocb *control_blocks[1];
|
|
|
|
|
|
|
|
assert(!b.test_flags(BF_IO_PENDING));
|
|
|
|
b.set_flags(BF_IO_PENDING);
|
|
|
|
nr_io_pending_++;
|
2016-02-04 14:27:41 +05:30
|
|
|
b.unlink();
|
|
|
|
io_pending_.push_back(b);
|
2014-07-30 16:57:33 +05:30
|
|
|
|
|
|
|
b.control_block_.aio_lio_opcode = opcode;
|
|
|
|
control_blocks[0] = &b.control_block_;
|
|
|
|
r = io_submit(aio_context_, 1, control_blocks);
|
|
|
|
if (r != 1) {
|
|
|
|
complete_io(b, EIO);
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
std::ostringstream out;
|
2014-07-31 16:48:01 +05:30
|
|
|
out << "couldn't issue " << desc << " io for block " << b.index_;
|
|
|
|
|
|
|
|
if (r < 0)
|
|
|
|
out << ": io_submit failed with " << r;
|
|
|
|
else
|
|
|
|
out << ": io_submit succeeded, but queued no io";
|
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
throw std::runtime_error(out.str());
|
2014-07-25 15:05:04 +05:30
|
|
|
}
|
2014-07-30 16:57:33 +05:30
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
void
|
|
|
|
block_cache::issue_read(block &b)
|
|
|
|
{
|
|
|
|
assert(!b.test_flags(BF_IO_PENDING));
|
|
|
|
issue_low_level(b, IO_CMD_PREAD, "read");
|
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
void
|
|
|
|
block_cache::issue_write(block &b)
|
|
|
|
{
|
|
|
|
assert(!b.test_flags(BF_IO_PENDING));
|
|
|
|
b.v_->prepare(b.data_, b.index_);
|
|
|
|
issue_low_level(b, IO_CMD_PWRITE, "write");
|
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
void
|
|
|
|
block_cache::wait_io()
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
unsigned i;
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
// FIXME: use a timeout to prevent hanging
|
|
|
|
r = io_getevents(aio_context_, 1, nr_cache_blocks_, &events_[0], NULL);
|
|
|
|
if (r < 0) {
|
2014-07-31 16:48:01 +05:30
|
|
|
std::ostringstream out;
|
|
|
|
out << "io_getevents failed: " << r;
|
|
|
|
throw std::runtime_error(out.str());
|
2014-07-25 15:05:04 +05:30
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
for (i = 0; i < static_cast<unsigned>(r); i++) {
|
|
|
|
io_event const &e = events_[i];
|
2016-02-24 20:01:51 +05:30
|
|
|
block *b = base::container_of(e.obj, &block::control_block_);
|
2014-07-28 18:43:28 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
if (e.res == block_size_ << SECTOR_SHIFT)
|
|
|
|
complete_io(*b, 0);
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
else if (e.res < 0)
|
|
|
|
complete_io(*b, e.res);
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
else {
|
2014-07-31 16:48:01 +05:30
|
|
|
std::ostringstream out;
|
|
|
|
out << "incomplete io for block " << b->index_
|
|
|
|
<< ", e.res = " << e.res
|
|
|
|
<< ", e.res2 = " << e.res2
|
|
|
|
<< ", offset = " << b->control_block_.u.c.offset
|
|
|
|
<< ", nbytes = " << b->control_block_.u.c.nbytes;
|
|
|
|
throw std::runtime_error(out.str());
|
2014-07-25 15:05:04 +05:30
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
}
|
2014-07-30 16:57:33 +05:30
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
/*----------------------------------------------------------------
|
|
|
|
* Clean/dirty list management
|
|
|
|
*--------------------------------------------------------------*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We're using lru lists atm, but I think it would be worth
|
|
|
|
* experimenting with a multiqueue approach.
|
|
|
|
*/
|
2016-02-04 14:27:41 +05:30
|
|
|
block_cache::block_list &
|
2014-07-30 16:57:33 +05:30
|
|
|
block_cache::__categorise(block &b)
|
|
|
|
{
|
|
|
|
if (b.error_)
|
2016-02-04 14:27:41 +05:30
|
|
|
return errored_;
|
2014-07-30 16:57:33 +05:30
|
|
|
|
2016-02-04 14:27:41 +05:30
|
|
|
return b.test_flags(BF_DIRTY) ? dirty_ : clean_;
|
2014-07-30 16:57:33 +05:30
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
void
|
|
|
|
block_cache::hit(block &b)
|
|
|
|
{
|
2016-02-04 14:27:41 +05:30
|
|
|
b.unlink();
|
|
|
|
__categorise(b).push_back(b);
|
2014-07-30 16:57:33 +05:30
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
/*----------------------------------------------------------------
|
|
|
|
* High level IO handling
|
|
|
|
*--------------------------------------------------------------*/
|
|
|
|
void
|
|
|
|
block_cache::wait_all()
|
|
|
|
{
|
2016-02-04 14:27:41 +05:30
|
|
|
while (!io_pending_.empty())
|
2014-07-30 16:57:33 +05:30
|
|
|
wait_io();
|
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
void
|
|
|
|
block_cache::wait_specific(block &b)
|
|
|
|
{
|
|
|
|
while (b.test_flags(BF_IO_PENDING))
|
|
|
|
wait_io();
|
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
unsigned
|
|
|
|
block_cache::writeback(unsigned count)
|
|
|
|
{
|
|
|
|
unsigned actual = 0, dirty_length = 0;
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2016-02-04 14:27:41 +05:30
|
|
|
// issue_write unlinks b, which invalidates the iteration, so we
|
|
|
|
// keep track of the next element before removing.
|
|
|
|
auto it = dirty_.begin();
|
|
|
|
auto next = it;
|
|
|
|
while (it != dirty_.end()) {
|
|
|
|
next = it;
|
|
|
|
++next;
|
2014-07-30 16:57:33 +05:30
|
|
|
dirty_length++;
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
if (actual == count)
|
|
|
|
break;
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
// The block may be on the dirty list from a prior
|
|
|
|
// acquisition.
|
2016-02-04 14:27:41 +05:30
|
|
|
if (it->ref_count_)
|
2014-07-30 16:57:33 +05:30
|
|
|
continue;
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2016-02-04 14:27:41 +05:30
|
|
|
issue_write(*it);
|
2014-07-30 16:57:33 +05:30
|
|
|
actual++;
|
2014-07-29 18:11:45 +05:30
|
|
|
|
2016-02-04 14:27:41 +05:30
|
|
|
it = next;
|
2014-07-29 18:11:45 +05:30
|
|
|
}
|
|
|
|
|
2016-02-04 14:27:41 +05:30
|
|
|
return actual;
|
2014-07-30 16:57:33 +05:30
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
/*----------------------------------------------------------------
|
|
|
|
* High level allocation
|
|
|
|
*--------------------------------------------------------------*/
|
|
|
|
void
|
|
|
|
block_cache::setup_control_block(block &b)
|
|
|
|
{
|
|
|
|
iocb *cb = &b.control_block_;
|
|
|
|
size_t block_size_bytes = block_size_ << SECTOR_SHIFT;
|
|
|
|
|
|
|
|
memset(cb, 0, sizeof(*cb));
|
|
|
|
cb->aio_fildes = fd_;
|
|
|
|
|
|
|
|
cb->u.c.buf = b.data_;
|
|
|
|
cb->u.c.offset = block_size_bytes * b.index_;
|
|
|
|
cb->u.c.nbytes = block_size_bytes;
|
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2016-02-04 14:27:41 +05:30
|
|
|
// FIXME: return a reference
|
2014-07-30 16:57:33 +05:30
|
|
|
block_cache::block *
|
|
|
|
block_cache::find_unused_clean_block()
|
|
|
|
{
|
2016-02-04 14:27:41 +05:30
|
|
|
for (block &b : clean_) {
|
|
|
|
if (b.ref_count_)
|
2014-07-30 16:57:33 +05:30
|
|
|
continue;
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2016-02-04 14:32:42 +05:30
|
|
|
b.unlink_set();
|
2016-02-04 14:27:41 +05:30
|
|
|
b.unlink();
|
|
|
|
return &b;
|
2014-07-25 15:05:04 +05:30
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
block_cache::block *
|
|
|
|
block_cache::new_block(block_address index)
|
|
|
|
{
|
|
|
|
block *b;
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
b = __alloc_block();
|
|
|
|
if (!b) {
|
2016-02-04 14:27:41 +05:30
|
|
|
if (clean_.empty()) {
|
|
|
|
if (io_pending_.empty())
|
2014-07-30 16:57:33 +05:30
|
|
|
writeback(16);
|
|
|
|
wait_io();
|
|
|
|
}
|
|
|
|
|
|
|
|
b = find_unused_clean_block();
|
2014-07-25 15:05:04 +05:30
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
if (b) {
|
|
|
|
b->bc_ = this;
|
|
|
|
b->ref_count_ = 0;
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
b->error_ = 0;
|
|
|
|
b->flags_ = 0;
|
2014-08-21 18:50:36 +05:30
|
|
|
b->v_ = noop_validator_;
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
b->index_ = index;
|
|
|
|
setup_control_block(*b);
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2016-02-04 14:27:41 +05:30
|
|
|
block_set_.insert(*b);
|
2014-07-25 15:05:04 +05:30
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
return b;
|
|
|
|
}
|
2014-07-25 15:05:04 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
/*----------------------------------------------------------------
|
|
|
|
* Block reference counting
|
|
|
|
*--------------------------------------------------------------*/
|
|
|
|
unsigned
|
|
|
|
block_cache::calc_nr_cache_blocks(size_t mem, sector_t block_size)
|
|
|
|
{
|
|
|
|
size_t space_per_block = (block_size << SECTOR_SHIFT) + sizeof(block);
|
|
|
|
unsigned r = mem / space_per_block;
|
2014-07-25 15:05:04 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
return (r < MIN_BLOCKS) ? MIN_BLOCKS : r;
|
|
|
|
}
|
2014-07-25 15:05:04 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
unsigned
|
|
|
|
block_cache::calc_nr_buckets(unsigned nr_blocks)
|
|
|
|
{
|
|
|
|
unsigned r = 8;
|
|
|
|
unsigned n = nr_blocks / 4;
|
2014-07-25 15:05:04 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
if (n < 8)
|
|
|
|
n = 8;
|
2014-07-25 15:05:04 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
while (r < n)
|
|
|
|
r <<= 1;
|
2014-07-25 15:05:04 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
return r;
|
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
block_cache::block_cache(int fd, sector_t block_size, uint64_t on_disk_blocks, size_t mem)
|
|
|
|
: nr_locked_(0),
|
|
|
|
nr_dirty_(0),
|
2014-07-31 16:48:01 +05:30
|
|
|
nr_io_pending_(0),
|
|
|
|
read_hits_(0),
|
|
|
|
read_misses_(0),
|
|
|
|
write_zeroes_(0),
|
|
|
|
write_hits_(0),
|
|
|
|
write_misses_(0),
|
2014-08-21 18:50:36 +05:30
|
|
|
prefetches_(0),
|
|
|
|
noop_validator_(new noop_validator())
|
2014-07-30 16:57:33 +05:30
|
|
|
{
|
|
|
|
int r;
|
|
|
|
unsigned nr_cache_blocks = calc_nr_cache_blocks(mem, block_size);
|
|
|
|
|
|
|
|
fd_ = fd;
|
|
|
|
block_size_ = block_size;
|
|
|
|
nr_data_blocks_ = on_disk_blocks;
|
|
|
|
nr_cache_blocks_ = nr_cache_blocks;
|
|
|
|
|
|
|
|
events_.resize(nr_cache_blocks);
|
|
|
|
|
|
|
|
aio_context_ = 0; /* needed or io_setup will fail */
|
|
|
|
r = io_setup(nr_cache_blocks, &aio_context_);
|
|
|
|
if (r < 0) {
|
|
|
|
perror("io_setup failed");
|
|
|
|
throw std::runtime_error("io_setup failed");
|
|
|
|
}
|
|
|
|
|
2016-02-04 14:27:41 +05:30
|
|
|
blocks_memory_.reset(new std::vector<block>(nr_cache_blocks));
|
2014-07-30 16:57:33 +05:30
|
|
|
|
|
|
|
r = init_free_list(nr_cache_blocks);
|
|
|
|
if (r)
|
|
|
|
throw std::runtime_error("couldn't allocate blocks");
|
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
block_cache::~block_cache()
|
|
|
|
{
|
|
|
|
assert(!nr_locked_);
|
|
|
|
flush();
|
|
|
|
wait_all();
|
2014-07-29 16:04:26 +05:30
|
|
|
|
2014-08-21 17:24:39 +05:30
|
|
|
exit_free_list();
|
2014-07-29 16:04:26 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
if (aio_context_)
|
|
|
|
io_destroy(aio_context_);
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
::close(fd_);
|
2014-07-31 16:48:01 +05:30
|
|
|
|
2014-08-12 14:45:14 +05:30
|
|
|
#if 0
|
2014-07-31 16:48:01 +05:30
|
|
|
std::cerr << "\nblock cache stats\n"
|
|
|
|
<< "=================\n"
|
|
|
|
<< "prefetches:\t" << prefetches_ << "\n"
|
|
|
|
<< "read hits:\t" << read_hits_ << "\n"
|
|
|
|
<< "read misses:\t" << read_misses_ << "\n"
|
|
|
|
<< "write hits:\t" << write_hits_ << "\n"
|
|
|
|
<< "write misses:\t" << write_misses_ << "\n"
|
2014-08-05 19:23:03 +05:30
|
|
|
<< "write zeroes:\t" << write_zeroes_ << std::endl;
|
2014-08-12 14:45:14 +05:30
|
|
|
#endif
|
2014-07-30 16:57:33 +05:30
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
uint64_t
|
|
|
|
block_cache::get_nr_blocks() const
|
|
|
|
{
|
|
|
|
return nr_data_blocks_;
|
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-08-21 16:48:05 +05:30
|
|
|
uint64_t
|
|
|
|
block_cache::get_nr_locked() const
|
|
|
|
{
|
|
|
|
return nr_locked_;
|
|
|
|
}
|
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
void
|
|
|
|
block_cache::zero_block(block &b)
|
|
|
|
{
|
2014-07-31 16:48:01 +05:30
|
|
|
write_zeroes_++;
|
2014-07-30 16:57:33 +05:30
|
|
|
memset(b.data_, 0, block_size_ << SECTOR_SHIFT);
|
|
|
|
b.mark_dirty();
|
|
|
|
}
|
|
|
|
|
2014-07-31 16:48:01 +05:30
|
|
|
void
|
|
|
|
block_cache::inc_hit_counter(unsigned flags)
|
|
|
|
{
|
|
|
|
if (flags & (GF_ZERO | GF_DIRTY))
|
|
|
|
write_hits_++;
|
|
|
|
else
|
|
|
|
read_hits_++;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
block_cache::inc_miss_counter(unsigned flags)
|
|
|
|
{
|
|
|
|
if (flags & (GF_ZERO | GF_DIRTY))
|
|
|
|
write_misses_++;
|
|
|
|
else
|
|
|
|
read_misses_++;
|
|
|
|
}
|
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
block_cache::block *
|
|
|
|
block_cache::lookup_or_read_block(block_address index, unsigned flags,
|
|
|
|
validator::ptr v)
|
|
|
|
{
|
2016-02-04 14:27:41 +05:30
|
|
|
auto it = block_set_.find(index, cmp_index());
|
2014-07-30 16:57:33 +05:30
|
|
|
|
2016-02-04 14:27:41 +05:30
|
|
|
if (it != block_set_.end()) {
|
|
|
|
if (it->test_flags(BF_IO_PENDING)) {
|
2014-07-31 16:48:01 +05:30
|
|
|
inc_miss_counter(flags);
|
2016-02-04 14:27:41 +05:30
|
|
|
wait_specific(*it);
|
2014-07-31 16:48:01 +05:30
|
|
|
} else
|
|
|
|
inc_hit_counter(flags);
|
2014-07-25 15:05:04 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
if (flags & GF_ZERO)
|
2016-02-04 14:27:41 +05:30
|
|
|
zero_block(*it);
|
2014-07-30 16:57:33 +05:30
|
|
|
else {
|
2016-02-04 14:27:41 +05:30
|
|
|
if (it->v_.get() != v.get()) {
|
|
|
|
if (it->test_flags(BF_DIRTY))
|
|
|
|
it->v_->prepare(it->data_, it->index_);
|
|
|
|
v->check(it->data_, it->index_);
|
2014-07-30 16:57:33 +05:30
|
|
|
}
|
|
|
|
}
|
2016-02-04 14:27:41 +05:30
|
|
|
it->v_ = v;
|
|
|
|
return &(*it);
|
2014-07-30 16:57:33 +05:30
|
|
|
|
|
|
|
} else {
|
2014-07-31 16:48:01 +05:30
|
|
|
inc_miss_counter(flags);
|
|
|
|
|
2016-02-04 14:27:41 +05:30
|
|
|
block *b = new_block(index);
|
2014-07-25 15:05:04 +05:30
|
|
|
if (b) {
|
|
|
|
if (flags & GF_ZERO)
|
|
|
|
zero_block(*b);
|
2014-07-25 19:16:51 +05:30
|
|
|
else {
|
2014-07-30 16:57:33 +05:30
|
|
|
issue_read(*b);
|
|
|
|
wait_specific(*b);
|
|
|
|
v->check(b->data_, b->index_);
|
2014-07-25 15:05:04 +05:30
|
|
|
}
|
2014-08-21 18:53:14 +05:30
|
|
|
|
|
|
|
b->v_ = v;
|
2014-07-25 15:05:04 +05:30
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2016-02-04 14:27:41 +05:30
|
|
|
return (!b || b->error_) ? NULL : b;
|
|
|
|
}
|
2014-07-30 16:57:33 +05:30
|
|
|
}
|
2014-07-28 18:43:28 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
block_cache::block &
|
|
|
|
block_cache::get(block_address index, unsigned flags, validator::ptr v)
|
|
|
|
{
|
|
|
|
check_index(index);
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
block *b = lookup_or_read_block(index, flags, v);
|
2014-07-28 18:43:28 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
if (b) {
|
2016-06-14 18:57:22 +05:30
|
|
|
if (b->ref_count_ && (flags & (GF_DIRTY | GF_ZERO))) {
|
|
|
|
std::ostringstream out;
|
|
|
|
out << "attempt to write lock block " << index << " concurrently";
|
|
|
|
throw std::runtime_error(out.str());
|
|
|
|
}
|
2014-07-29 16:04:26 +05:30
|
|
|
|
2014-07-31 16:48:01 +05:30
|
|
|
// FIXME: this gets called even for new blocks
|
2014-07-30 16:57:33 +05:30
|
|
|
hit(*b);
|
2014-07-29 16:04:26 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
if (!b->ref_count_)
|
|
|
|
nr_locked_++;
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
b->ref_count_++;
|
2014-07-28 18:43:28 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
if (flags & GF_BARRIER)
|
|
|
|
b->set_flags(BF_FLUSH);
|
2014-07-28 18:43:28 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
if (flags & GF_DIRTY)
|
|
|
|
b->set_flags(BF_DIRTY);
|
2014-07-25 15:05:04 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
return *b;
|
2014-07-22 21:13:44 +05:30
|
|
|
}
|
|
|
|
|
2016-02-26 22:57:27 +05:30
|
|
|
std::ostringstream out;
|
|
|
|
out << "couldn't get block " << index;
|
|
|
|
throw std::runtime_error(out.str());
|
2014-07-30 16:57:33 +05:30
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
void
|
|
|
|
block_cache::preemptive_writeback()
|
|
|
|
{
|
|
|
|
unsigned nr_available = nr_cache_blocks_ - (nr_dirty_ - nr_io_pending_);
|
|
|
|
if (nr_available < (WRITEBACK_LOW_THRESHOLD_PERCENT * nr_cache_blocks_ / 100))
|
|
|
|
writeback((WRITEBACK_HIGH_THRESHOLD_PERCENT * nr_cache_blocks_ / 100) - nr_available);
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
void
|
|
|
|
block_cache::release(block_cache::block &b)
|
|
|
|
{
|
|
|
|
assert(!b.ref_count_);
|
2014-07-29 16:04:26 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
nr_locked_--;
|
2014-07-28 18:43:28 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
if (b.test_flags(BF_FLUSH))
|
|
|
|
flush();
|
2014-07-28 18:43:28 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
if (b.test_flags(BF_DIRTY)) {
|
|
|
|
if (!b.test_flags(BF_PREVIOUSLY_DIRTY)) {
|
2016-02-04 14:27:41 +05:30
|
|
|
b.unlink();
|
|
|
|
dirty_.push_back(b);
|
2014-07-30 16:57:33 +05:30
|
|
|
nr_dirty_++;
|
|
|
|
b.set_flags(BF_PREVIOUSLY_DIRTY);
|
2014-07-25 15:05:04 +05:30
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
if (b.test_flags(BF_FLUSH))
|
|
|
|
flush();
|
|
|
|
else
|
|
|
|
preemptive_writeback();
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
b.clear_flags(BF_FLUSH);
|
|
|
|
}
|
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
int
|
|
|
|
block_cache::flush()
|
|
|
|
{
|
2016-02-04 14:27:41 +05:30
|
|
|
while (!dirty_.empty()) {
|
|
|
|
block &b = dirty_.front();
|
|
|
|
if (b.ref_count_ || b.test_flags(BF_IO_PENDING))
|
2014-07-30 16:57:33 +05:30
|
|
|
// The superblock may well be still locked.
|
|
|
|
continue;
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2016-02-04 14:27:41 +05:30
|
|
|
issue_write(b);
|
2014-07-22 21:13:44 +05:30
|
|
|
}
|
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
wait_all();
|
2014-07-28 18:43:28 +05:30
|
|
|
|
2016-02-04 14:27:41 +05:30
|
|
|
return errored_.empty() ? 0 : -EIO;
|
2014-07-30 16:57:33 +05:30
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
void
|
|
|
|
block_cache::prefetch(block_address index)
|
|
|
|
{
|
|
|
|
check_index(index);
|
|
|
|
|
2016-02-04 14:27:41 +05:30
|
|
|
auto it = block_set_.find(index, cmp_index());
|
|
|
|
|
|
|
|
if (it == block_set_.end()) {
|
2014-07-31 16:48:01 +05:30
|
|
|
prefetches_++;
|
|
|
|
|
2016-02-04 14:27:41 +05:30
|
|
|
block *b = new_block(index);
|
2014-07-30 16:57:33 +05:30
|
|
|
if (b)
|
|
|
|
issue_read(*b);
|
2014-07-22 21:13:44 +05:30
|
|
|
}
|
2014-07-30 16:57:33 +05:30
|
|
|
}
|
2014-07-22 21:13:44 +05:30
|
|
|
|
2014-07-30 16:57:33 +05:30
|
|
|
void
|
|
|
|
block_cache::check_index(block_address index) const
|
|
|
|
{
|
|
|
|
if (index >= nr_data_blocks_) {
|
|
|
|
std::ostringstream out;
|
|
|
|
out << "block out of bounds ("
|
|
|
|
<< index << " >= " << nr_data_blocks_ << ")\n";
|
|
|
|
throw std::runtime_error(out.str());
|
2014-07-22 21:13:44 +05:30
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-25 15:05:04 +05:30
|
|
|
//----------------------------------------------------------------
|