From eec40579d84873dfb7021eb24c50360f073237c5 Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Mon, 3 Mar 2014 10:23:15 -0500 Subject: dm: add era target dm-era is a target that behaves similar to the linear target. In addition it keeps track of which blocks were written within a user defined period of time called an 'era'. Each era target instance maintains the current era as a monotonically increasing 32-bit counter. Use cases include tracking changed blocks for backup software, and partially invalidating the contents of a cache to restore cache coherency after rolling back a vendor snapshot. dm-era is primarily expected to be paired with the dm-cache target. Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer diff --git a/Documentation/device-mapper/era.txt b/Documentation/device-mapper/era.txt new file mode 100644 index 0000000..3c6d01b --- /dev/null +++ b/Documentation/device-mapper/era.txt @@ -0,0 +1,108 @@ +Introduction +============ + +dm-era is a target that behaves similar to the linear target. In +addition it keeps track of which blocks were written within a user +defined period of time called an 'era'. Each era target instance +maintains the current era as a monotonically increasing 32-bit +counter. + +Use cases include tracking changed blocks for backup software, and +partially invalidating the contents of a cache to restore cache +coherency after rolling back a vendor snapshot. + +Constructor +=========== + + era + + metadata dev : fast device holding the persistent metadata + origin dev : device holding data blocks that may change + block size : block size of origin data device, granularity that is + tracked by the target + +Messages +======== + +None of the dm messages take any arguments. + +checkpoint +---------- + +Possibly move to a new era. You shouldn't assume the era has +incremented. After sending this message, you should check the +current era via the status line. + +take_metadata_snap +------------------ + +Create a clone of the metadata, to allow a userland process to read it. + +drop_metadata_snap +------------------ + +Drop the metadata snapshot. + +Status +====== + + <#used metadata blocks>/<#total metadata blocks> + + +metadata block size : Fixed block size for each metadata block in + sectors +#used metadata blocks : Number of metadata blocks used +#total metadata blocks : Total number of metadata blocks +current era : The current era +held metadata root : The location, in blocks, of the metadata root + that has been 'held' for userspace read + access. '-' indicates there is no held root + +Detailed use case +================= + +The scenario of invalidating a cache when rolling back a vendor +snapshot was the primary use case when developing this target: + +Taking a vendor snapshot +------------------------ + +- Send a checkpoint message to the era target +- Make a note of the current era in its status line +- Take vendor snapshot (the era and snapshot should be forever + associated now). + +Rolling back to an vendor snapshot +---------------------------------- + +- Cache enters passthrough mode (see: dm-cache's docs in cache.txt) +- Rollback vendor storage +- Take metadata snapshot +- Ascertain which blocks have been written since the snapshot was taken + by checking each block's era +- Invalidate those blocks in the caching software +- Cache returns to writeback/writethrough mode + +Memory usage +============ + +The target uses a bitset to record writes in the current era. It also +has a spare bitset ready for switching over to a new era. Other than +that it uses a few 4k blocks for updating metadata. + + (4 * nr_blocks) bytes + buffers + +Resilience +========== + +Metadata is updated on disk before a write to a previously unwritten +block is performed. As such dm-era should not be effected by a hard +crash such as power failure. + +Userland tools +============== + +Userland tools are found in the increasingly poorly named +thin-provisioning-tools project: + + https://github.com/jthornber/thin-provisioning-tools diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 95ad936..5bdedf6 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -285,6 +285,17 @@ config DM_CACHE_CLEANER A simple cache policy that writes back all data to the origin. Used when decommissioning a dm-cache. +config DM_ERA + tristate "Era target (EXPERIMENTAL)" + depends on BLK_DEV_DM + default n + select DM_PERSISTENT_DATA + select DM_BIO_PRISON + ---help--- + dm-era tracks which parts of a block device are written to + over time. Useful for maintaining cache coherency when using + vendor snapshots. + config DM_MIRROR tristate "Mirror target" depends on BLK_DEV_DM diff --git a/drivers/md/Makefile b/drivers/md/Makefile index f26d832..a2da532 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -14,6 +14,7 @@ dm-thin-pool-y += dm-thin.o dm-thin-metadata.o dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o dm-cache-mq-y += dm-cache-policy-mq.o dm-cache-cleaner-y += dm-cache-policy-cleaner.o +dm-era-y += dm-era-target.o md-mod-y += md.o bitmap.o raid456-y += raid5.o @@ -53,6 +54,7 @@ obj-$(CONFIG_DM_VERITY) += dm-verity.o obj-$(CONFIG_DM_CACHE) += dm-cache.o obj-$(CONFIG_DM_CACHE_MQ) += dm-cache-mq.o obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o +obj-$(CONFIG_DM_ERA) += dm-era.o ifeq ($(CONFIG_DM_UEVENT),y) dm-mod-objs += dm-uevent.o diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c new file mode 100644 index 0000000..03d9560 --- /dev/null +++ b/drivers/md/dm-era-target.c @@ -0,0 +1,1730 @@ +#include "dm.h" +#include "persistent-data/dm-transaction-manager.h" +#include "persistent-data/dm-bitset.h" +#include "persistent-data/dm-space-map.h" + +#include +#include +#include +#include +#include +#include +#include + +#define DM_MSG_PREFIX "era" + +#define SUPERBLOCK_LOCATION 0 +#define SUPERBLOCK_MAGIC 2126579579 +#define SUPERBLOCK_CSUM_XOR 146538381 +#define MIN_ERA_VERSION 1 +#define MAX_ERA_VERSION 1 +#define INVALID_WRITESET_ROOT SUPERBLOCK_LOCATION +#define MIN_BLOCK_SIZE 8 + +/*---------------------------------------------------------------- + * Writeset + *--------------------------------------------------------------*/ +struct writeset_metadata { + uint32_t nr_bits; + dm_block_t root; +}; + +struct writeset { + struct writeset_metadata md; + + /* + * An in core copy of the bits to save constantly doing look ups on + * disk. + */ + unsigned long *bits; +}; + +/* + * This does not free off the on disk bitset as this will normally be done + * after digesting into the era array. + */ +static void writeset_free(struct writeset *ws) +{ + vfree(ws->bits); +} + +static int setup_on_disk_bitset(struct dm_disk_bitset *info, + unsigned nr_bits, dm_block_t *root) +{ + int r; + + r = dm_bitset_empty(info, root); + if (r) + return r; + + return dm_bitset_resize(info, *root, 0, nr_bits, false, root); +} + +static size_t bitset_size(unsigned nr_bits) +{ + return sizeof(unsigned long) * dm_div_up(nr_bits, BITS_PER_LONG); +} + +/* + * Allocates memory for the in core bitset. + */ +static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks) +{ + ws->md.nr_bits = nr_blocks; + ws->md.root = INVALID_WRITESET_ROOT; + ws->bits = vzalloc(bitset_size(nr_blocks)); + if (!ws->bits) { + DMERR("%s: couldn't allocate in memory bitset", __func__); + return -ENOMEM; + } + + return 0; +} + +/* + * Wipes the in-core bitset, and creates a new on disk bitset. + */ +static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws) +{ + int r; + + memset(ws->bits, 0, bitset_size(ws->md.nr_bits)); + + r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root); + if (r) { + DMERR("%s: setup_on_disk_bitset failed", __func__); + return r; + } + + return 0; +} + +static bool writeset_marked(struct writeset *ws, dm_block_t block) +{ + return test_bit(block, ws->bits); +} + +static int writeset_marked_on_disk(struct dm_disk_bitset *info, + struct writeset_metadata *m, dm_block_t block, + bool *result) +{ + dm_block_t old = m->root; + + /* + * The bitset was flushed when it was archived, so we know there'll + * be no change to the root. + */ + int r = dm_bitset_test_bit(info, m->root, block, &m->root, result); + if (r) { + DMERR("%s: dm_bitset_test_bit failed", __func__); + return r; + } + + BUG_ON(m->root != old); + + return r; +} + +/* + * Returns < 0 on error, 0 if the bit wasn't previously set, 1 if it was. + */ +static int writeset_test_and_set(struct dm_disk_bitset *info, + struct writeset *ws, uint32_t block) +{ + int r; + + if (!test_and_set_bit(block, ws->bits)) { + r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root); + if (r) { + /* FIXME: fail mode */ + return r; + } + + return 0; + } + + return 1; +} + +/*---------------------------------------------------------------- + * On disk metadata layout + *--------------------------------------------------------------*/ +#define SPACE_MAP_ROOT_SIZE 128 +#define UUID_LEN 16 + +struct writeset_disk { + __le32 nr_bits; + __le64 root; +} __packed; + +struct superblock_disk { + __le32 csum; + __le32 flags; + __le64 blocknr; + + __u8 uuid[UUID_LEN]; + __le64 magic; + __le32 version; + + __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE]; + + __le32 data_block_size; + __le32 metadata_block_size; + __le32 nr_blocks; + + __le32 current_era; + struct writeset_disk current_writeset; + + /* + * Only these two fields are valid within the metadata snapshot. + */ + __le64 writeset_tree_root; + __le64 era_array_root; + + __le64 metadata_snap; +} __packed; + +/*---------------------------------------------------------------- + * Superblock validation + *--------------------------------------------------------------*/ +static void sb_prepare_for_write(struct dm_block_validator *v, + struct dm_block *b, + size_t sb_block_size) +{ + struct superblock_disk *disk = dm_block_data(b); + + disk->blocknr = cpu_to_le64(dm_block_location(b)); + disk->csum = cpu_to_le32(dm_bm_checksum(&disk->flags, + sb_block_size - sizeof(__le32), + SUPERBLOCK_CSUM_XOR)); +} + +static int check_metadata_version(struct superblock_disk *disk) +{ + uint32_t metadata_version = le32_to_cpu(disk->version); + if (metadata_version < MIN_ERA_VERSION || metadata_version > MAX_ERA_VERSION) { + DMERR("Era metadata version %u found, but only versions between %u and %u supported.", + metadata_version, MIN_ERA_VERSION, MAX_ERA_VERSION); + return -EINVAL; + } + + return 0; +} + +static int sb_check(struct dm_block_validator *v, + struct dm_block *b, + size_t sb_block_size) +{ + struct superblock_disk *disk = dm_block_data(b); + __le32 csum_le; + + if (dm_block_location(b) != le64_to_cpu(disk->blocknr)) { + DMERR("sb_check failed: blocknr %llu: wanted %llu", + le64_to_cpu(disk->blocknr), + (unsigned long long)dm_block_location(b)); + return -ENOTBLK; + } + + if (le64_to_cpu(disk->magic) != SUPERBLOCK_MAGIC) { + DMERR("sb_check failed: magic %llu: wanted %llu", + le64_to_cpu(disk->magic), + (unsigned long long) SUPERBLOCK_MAGIC); + return -EILSEQ; + } + + csum_le = cpu_to_le32(dm_bm_checksum(&disk->flags, + sb_block_size - sizeof(__le32), + SUPERBLOCK_CSUM_XOR)); + if (csum_le != disk->csum) { + DMERR("sb_check failed: csum %u: wanted %u", + le32_to_cpu(csum_le), le32_to_cpu(disk->csum)); + return -EILSEQ; + } + + return check_metadata_version(disk); +} + +static struct dm_block_validator sb_validator = { + .name = "superblock", + .prepare_for_write = sb_prepare_for_write, + .check = sb_check +}; + +/*---------------------------------------------------------------- + * Low level metadata handling + *--------------------------------------------------------------*/ +#define DM_ERA_METADATA_BLOCK_SIZE 4096 +#define DM_ERA_METADATA_CACHE_SIZE 64 +#define ERA_MAX_CONCURRENT_LOCKS 5 + +struct era_metadata { + struct block_device *bdev; + struct dm_block_manager *bm; + struct dm_space_map *sm; + struct dm_transaction_manager *tm; + + dm_block_t block_size; + uint32_t nr_blocks; + + uint32_t current_era; + + /* + * We preallocate 2 writesets. When an era rolls over we + * switch between them. This means the allocation is done at + * preresume time, rather than on the io path. + */ + struct writeset writesets[2]; + struct writeset *current_writeset; + + dm_block_t writeset_tree_root; + dm_block_t era_array_root; + + struct dm_disk_bitset bitset_info; + struct dm_btree_info writeset_tree_info; + struct dm_array_info era_array_info; + + dm_block_t metadata_snap; + + /* + * A flag that is set whenever a writeset has been archived. + */ + bool archived_writesets; +}; + +static int superblock_read_lock(struct era_metadata *md, + struct dm_block **sblock) +{ + return dm_bm_read_lock(md->bm, SUPERBLOCK_LOCATION, + &sb_validator, sblock); +} + +static int superblock_lock_zero(struct era_metadata *md, + struct dm_block **sblock) +{ + return dm_bm_write_lock_zero(md->bm, SUPERBLOCK_LOCATION, + &sb_validator, sblock); +} + +static int superblock_lock(struct era_metadata *md, + struct dm_block **sblock) +{ + return dm_bm_write_lock(md->bm, SUPERBLOCK_LOCATION, + &sb_validator, sblock); +} + +/* FIXME: duplication with cache and thin */ +static int superblock_all_zeroes(struct dm_block_manager *bm, bool *result) +{ + int r; + unsigned i; + struct dm_block *b; + __le64 *data_le, zero = cpu_to_le64(0); + unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64); + + /* + * We can't use a validator here - it may be all zeroes. + */ + r = dm_bm_read_lock(bm, SUPERBLOCK_LOCATION, NULL, &b); + if (r) + return r; + + data_le = dm_block_data(b); + *result = true; + for (i = 0; i < sb_block_size; i++) { + if (data_le[i] != zero) { + *result = false; + break; + } + } + + return dm_bm_unlock(b); +} + +/*----------------------------------------------------------------*/ + +static void ws_pack(const struct writeset_metadata *core, struct writeset_disk *disk) +{ + disk->nr_bits = cpu_to_le32(core->nr_bits); + disk->root = cpu_to_le64(core->root); +} + +static void ws_unpack(const struct writeset_disk *disk, struct writeset_metadata *core) +{ + core->nr_bits = le32_to_cpu(disk->nr_bits); + core->root = le64_to_cpu(disk->root); +} + +static void ws_inc(void *context, const void *value) +{ + struct era_metadata *md = context; + struct writeset_disk ws_d; + dm_block_t b; + + memcpy(&ws_d, value, sizeof(ws_d)); + b = le64_to_cpu(ws_d.root); + + dm_tm_inc(md->tm, b); +} + +static void ws_dec(void *context, const void *value) +{ + struct era_metadata *md = context; + struct writeset_disk ws_d; + dm_block_t b; + + memcpy(&ws_d, value, sizeof(ws_d)); + b = le64_to_cpu(ws_d.root); + + dm_bitset_del(&md->bitset_info, b); +} + +static int ws_eq(void *context, const void *value1, const void *value2) +{ + return !memcmp(value1, value2, sizeof(struct writeset_metadata)); +} + +/*----------------------------------------------------------------*/ + +static void setup_writeset_tree_info(struct era_metadata *md) +{ + struct dm_btree_value_type *vt = &md->writeset_tree_info.value_type; + md->writeset_tree_info.tm = md->tm; + md->writeset_tree_info.levels = 1; + vt->context = md; + vt->size = sizeof(struct writeset_disk); + vt->inc = ws_inc; + vt->dec = ws_dec; + vt->equal = ws_eq; +} + +static void setup_era_array_info(struct era_metadata *md) + +{ + struct dm_btree_value_type vt; + vt.context = NULL; + vt.size = sizeof(__le32); + vt.inc = NULL; + vt.dec = NULL; + vt.equal = NULL; + + dm_array_info_init(&md->era_array_info, md->tm, &vt); +} + +static void setup_infos(struct era_metadata *md) +{ + dm_disk_bitset_init(md->tm, &md->bitset_info); + setup_writeset_tree_info(md); + setup_era_array_info(md); +} + +/*----------------------------------------------------------------*/ + +static int create_fresh_metadata(struct era_metadata *md) +{ + int r; + + r = dm_tm_create_with_sm(md->bm, SUPERBLOCK_LOCATION, + &md->tm, &md->sm); + if (r < 0) { + DMERR("dm_tm_create_with_sm failed"); + return r; + } + + setup_infos(md); + + r = dm_btree_empty(&md->writeset_tree_info, &md->writeset_tree_root); + if (r) { + DMERR("couldn't create new writeset tree"); + goto bad; + } + + r = dm_array_empty(&md->era_array_info, &md->era_array_root); + if (r) { + DMERR("couldn't create era array"); + goto bad; + } + + return 0; + +bad: + dm_sm_destroy(md->sm); + dm_tm_destroy(md->tm); + + return r; +} + +/* + * Writes a superblock, including the static fields that don't get updated + * with every commit (possible optimisation here). 'md' should be fully + * constructed when this is called. + */ +static int prepare_superblock(struct era_metadata *md, struct superblock_disk *disk) +{ + int r; + size_t metadata_len; + + disk->magic = cpu_to_le64(SUPERBLOCK_MAGIC); + disk->flags = cpu_to_le32(0ul); + + /* FIXME: can't keep blanking the uuid (uuid is currently unused though) */ + memset(disk->uuid, 0, sizeof(disk->uuid)); + disk->version = cpu_to_le32(MAX_ERA_VERSION); + + r = dm_sm_root_size(md->sm, &metadata_len); + if (r < 0) + return r; + + r = dm_sm_copy_root(md->sm, &disk->metadata_space_map_root, + metadata_len); + if (r < 0) + return r; + + disk->data_block_size = cpu_to_le32(md->block_size); + disk->metadata_block_size = cpu_to_le32(DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); + disk->nr_blocks = cpu_to_le32(md->nr_blocks); + disk->current_era = cpu_to_le32(md->current_era); + + ws_pack(&md->current_writeset->md, &disk->current_writeset); + disk->writeset_tree_root = cpu_to_le64(md->writeset_tree_root); + disk->era_array_root = cpu_to_le64(md->era_array_root); + disk->metadata_snap = cpu_to_le64(md->metadata_snap); + + return 0; +} + +static int write_superblock(struct era_metadata *md) +{ + int r; + struct dm_block *sblock; + struct superblock_disk *disk; + + r = superblock_lock_zero(md, &sblock); + if (r) + return r; + + disk = dm_block_data(sblock); + r = prepare_superblock(md, disk); + if (r) { + DMERR("%s: prepare_superblock failed", __func__); + dm_bm_unlock(sblock); /* FIXME: does this commit? */ + return r; + } + + return dm_tm_commit(md->tm, sblock); +} + +/* + * Assumes block_size and the infos are set. + */ +static int format_metadata(struct era_metadata *md) +{ + int r; + + r = create_fresh_metadata(md); + if (r) + return r; + + r = write_superblock(md); + if (r) { + dm_sm_destroy(md->sm); + dm_tm_destroy(md->tm); + return r; + } + + return 0; +} + +static int open_metadata(struct era_metadata *md) +{ + int r; + struct dm_block *sblock; + struct superblock_disk *disk; + + r = superblock_read_lock(md, &sblock); + if (r) { + DMERR("couldn't read_lock superblock"); + return r; + } + + disk = dm_block_data(sblock); + r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION, + disk->metadata_space_map_root, + sizeof(disk->metadata_space_map_root), + &md->tm, &md->sm); + if (r) { + DMERR("dm_tm_open_with_sm failed"); + goto bad; + } + + setup_infos(md); + + md->block_size = le32_to_cpu(disk->data_block_size); + md->nr_blocks = le32_to_cpu(disk->nr_blocks); + md->current_era = le32_to_cpu(disk->current_era); + + md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root); + md->era_array_root = le64_to_cpu(disk->era_array_root); + md->metadata_snap = le64_to_cpu(disk->metadata_snap); + md->archived_writesets = true; + + return dm_bm_unlock(sblock); + +bad: + dm_bm_unlock(sblock); + return r; +} + +static int open_or_format_metadata(struct era_metadata *md, + bool may_format) +{ + int r; + bool unformatted = false; + + r = superblock_all_zeroes(md->bm, &unformatted); + if (r) + return r; + + if (unformatted) + return may_format ? format_metadata(md) : -EPERM; + + return open_metadata(md); +} + +static int create_persistent_data_objects(struct era_metadata *md, + bool may_format) +{ + int r; + + md->bm = dm_block_manager_create(md->bdev, DM_ERA_METADATA_BLOCK_SIZE, + DM_ERA_METADATA_CACHE_SIZE, + ERA_MAX_CONCURRENT_LOCKS); + if (IS_ERR(md->bm)) { + DMERR("could not create block manager"); + return PTR_ERR(md->bm); + } + + r = open_or_format_metadata(md, may_format); + if (r) + dm_block_manager_destroy(md->bm); + + return r; +} + +static void destroy_persistent_data_objects(struct era_metadata *md) +{ + dm_sm_destroy(md->sm); + dm_tm_destroy(md->tm); + dm_block_manager_destroy(md->bm); +} + +/* + * This waits until all era_map threads have picked up the new filter. + */ +static void swap_writeset(struct era_metadata *md, struct writeset *new_writeset) +{ + rcu_assign_pointer(md->current_writeset, new_writeset); + synchronize_rcu(); +} + +/*---------------------------------------------------------------- + * Writesets get 'digested' into the main era array. + * + * We're using a coroutine here so the worker thread can do the digestion, + * thus avoiding synchronisation of the metadata. Digesting a whole + * writeset in one go would cause too much latency. + *--------------------------------------------------------------*/ +struct digest { + uint32_t era; + unsigned nr_bits, current_bit; + struct writeset_metadata writeset; + __le32 value; + struct dm_disk_bitset info; + + int (*step)(struct era_metadata *, struct digest *); +}; + +static int metadata_digest_lookup_writeset(struct era_metadata *md, + struct digest *d); + +static int metadata_digest_remove_writeset(struct era_metadata *md, + struct digest *d) +{ + int r; + uint64_t key = d->era; + + r = dm_btree_remove(&md->writeset_tree_info, md->writeset_tree_root, + &key, &md->writeset_tree_root); + if (r) { + DMERR("%s: dm_btree_remove failed", __func__); + return r; + } + + d->step = metadata_digest_lookup_writeset; + return 0; +} + +#define INSERTS_PER_STEP 100 + +static int metadata_digest_transcribe_writeset(struct era_metadata *md, + struct digest *d) +{ + int r; + bool marked; + unsigned b, e = min(d->current_bit + INSERTS_PER_STEP, d->nr_bits); + + for (b = d->current_bit; b < e; b++) { + r = writeset_marked_on_disk(&d->info, &d->writeset, b, &marked); + if (r) { + DMERR("%s: writeset_marked_on_disk failed", __func__); + return r; + } + + if (!marked) + continue; + + __dm_bless_for_disk(&d->value); + r = dm_array_set_value(&md->era_array_info, md->era_array_root, + b, &d->value, &md->era_array_root); + if (r) { + DMERR("%s: dm_array_set_value failed", __func__); + return r; + } + } + + if (b == d->nr_bits) + d->step = metadata_digest_remove_writeset; + else + d->current_bit = b; + + return 0; +} + +static int metadata_digest_lookup_writeset(struct era_metadata *md, + struct digest *d) +{ + int r; + uint64_t key; + struct writeset_disk disk; + + r = dm_btree_find_lowest_key(&md->writeset_tree_info, + md->writeset_tree_root, &key); + if (r < 0) + return r; + + d->era = key; + + r = dm_btree_lookup(&md->writeset_tree_info, + md->writeset_tree_root, &key, &disk); + if (r) { + if (r == -ENODATA) { + d->step = NULL; + return 0; + } + + DMERR("%s: dm_btree_lookup failed", __func__); + return r; + } + + ws_unpack(&disk, &d->writeset); + d->value = cpu_to_le32(key); + + d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks); + d->current_bit = 0; + d->step = metadata_digest_transcribe_writeset; + + return 0; +} + +static int metadata_digest_start(struct era_metadata *md, struct digest *d) +{ + if (d->step) + return 0; + + memset(d, 0, sizeof(*d)); + + /* + * We initialise another bitset info to avoid any caching side + * effects with the previous one. + */ + dm_disk_bitset_init(md->tm, &d->info); + d->step = metadata_digest_lookup_writeset; + + return 0; +} + +/*---------------------------------------------------------------- + * High level metadata interface. Target methods should use these, and not + * the lower level ones. + *--------------------------------------------------------------*/ +static struct era_metadata *metadata_open(struct block_device *bdev, + sector_t block_size, + bool may_format) +{ + int r; + struct era_metadata *md = kzalloc(sizeof(*md), GFP_KERNEL); + + if (!md) + return NULL; + + md->bdev = bdev; + md->block_size = block_size; + + md->writesets[0].md.root = INVALID_WRITESET_ROOT; + md->writesets[1].md.root = INVALID_WRITESET_ROOT; + md->current_writeset = &md->writesets[0]; + + r = create_persistent_data_objects(md, may_format); + if (r) { + kfree(md); + return ERR_PTR(r); + } + + return md; +} + +static void metadata_close(struct era_metadata *md) +{ + destroy_persistent_data_objects(md); + kfree(md); +} + +static bool valid_nr_blocks(dm_block_t n) +{ + /* + * dm_bitset restricts us to 2^32. test_bit & co. restrict us + * further to 2^31 - 1 + */ + return n < (1ull << 31); +} + +static int metadata_resize(struct era_metadata *md, void *arg) +{ + int r; + dm_block_t *new_size = arg; + __le32 value; + + if (!valid_nr_blocks(*new_size)) { + DMERR("Invalid number of origin blocks %llu", + (unsigned long long) *new_size); + return -EINVAL; + } + + writeset_free(&md->writesets[0]); + writeset_free(&md->writesets[1]); + + r = writeset_alloc(&md->writesets[0], *new_size); + if (r) { + DMERR("%s: writeset_alloc failed for writeset 0", __func__); + return r; + } + + r = writeset_alloc(&md->writesets[1], *new_size); + if (r) { + DMERR("%s: writeset_alloc failed for writeset 1", __func__); + return r; + } + + value = cpu_to_le32(0u); + __dm_bless_for_disk(&value); + r = dm_array_resize(&md->era_array_info, md->era_array_root, + md->nr_blocks, *new_size, + &value, &md->era_array_root); + if (r) { + DMERR("%s: dm_array_resize failed", __func__); + return r; + } + + md->nr_blocks = *new_size; + return 0; +} + +static int metadata_era_archive(struct era_metadata *md) +{ + int r; + uint64_t keys[1]; + struct writeset_disk value; + + r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root, + &md->current_writeset->md.root); + if (r) { + DMERR("%s: dm_bitset_flush failed", __func__); + return r; + } + + ws_pack(&md->current_writeset->md, &value); + md->current_writeset->md.root = INVALID_WRITESET_ROOT; + + keys[0] = md->current_era; + __dm_bless_for_disk(&value); + r = dm_btree_insert(&md->writeset_tree_info, md->writeset_tree_root, + keys, &value, &md->writeset_tree_root); + if (r) { + DMERR("%s: couldn't insert writeset into btree", __func__); + /* FIXME: fail mode */ + return r; + } + + md->archived_writesets = true; + + return 0; +} + +static struct writeset *next_writeset(struct era_metadata *md) +{ + return (md->current_writeset == &md->writesets[0]) ? + &md->writesets[1] : &md->writesets[0]; +} + +static int metadata_new_era(struct era_metadata *md) +{ + int r; + struct writeset *new_writeset = next_writeset(md); + + r = writeset_init(&md->bitset_info, new_writeset); + if (r) { + DMERR("%s: writeset_init failed", __func__); + return r; + } + + swap_writeset(md, new_writeset); + md->current_era++; + + return 0; +} + +static int metadata_era_rollover(struct era_metadata *md) +{ + int r; + + if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) { + r = metadata_era_archive(md); + if (r) { + DMERR("%s: metadata_archive_era failed", __func__); + /* FIXME: fail mode? */ + return r; + } + } + + r = metadata_new_era(md); + if (r) { + DMERR("%s: new era failed", __func__); + /* FIXME: fail mode */ + return r; + } + + return 0; +} + +static bool metadata_current_marked(struct era_metadata *md, dm_block_t block) +{ + bool r; + struct writeset *ws; + + rcu_read_lock(); + ws = rcu_dereference(md->current_writeset); + r = writeset_marked(ws, block); + rcu_read_unlock(); + + return r; +} + +static int metadata_commit(struct era_metadata *md) +{ + int r; + struct dm_block *sblock; + + if (md->current_writeset->md.root != SUPERBLOCK_LOCATION) { + r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root, + &md->current_writeset->md.root); + if (r) { + DMERR("%s: bitset flush failed", __func__); + return r; + } + } + + r = dm_tm_pre_commit(md->tm); + if (r) { + DMERR("%s: pre commit failed", __func__); + return r; + } + + r = superblock_lock(md, &sblock); + if (r) { + DMERR("%s: superblock lock failed", __func__); + return r; + } + + r = prepare_superblock(md, dm_block_data(sblock)); + if (r) { + DMERR("%s: prepare_superblock failed", __func__); + dm_bm_unlock(sblock); /* FIXME: does this commit? */ + return r; + } + + return dm_tm_commit(md->tm, sblock); +} + +static int metadata_checkpoint(struct era_metadata *md) +{ + /* + * For now we just rollover, but later I want to put a check in to + * avoid this if the filter is still pretty fresh. + */ + return metadata_era_rollover(md); +} + +/* + * Metadata snapshots allow userland to access era data. + */ +static int metadata_take_snap(struct era_metadata *md) +{ + int r, inc; + struct dm_block *clone; + + if (md->metadata_snap != SUPERBLOCK_LOCATION) { + DMERR("%s: metadata snapshot already exists", __func__); + return -EINVAL; + } + + r = metadata_era_rollover(md); + if (r) { + DMERR("%s: era rollover failed", __func__); + return r; + } + + r = metadata_commit(md); + if (r) { + DMERR("%s: pre commit failed", __func__); + return r; + } + + r = dm_sm_inc_block(md->sm, SUPERBLOCK_LOCATION); + if (r) { + DMERR("%s: couldn't increment superblock", __func__); + return r; + } + + r = dm_tm_shadow_block(md->tm, SUPERBLOCK_LOCATION, + &sb_validator, &clone, &inc); + if (r) { + DMERR("%s: couldn't shadow superblock", __func__); + dm_sm_dec_block(md->sm, SUPERBLOCK_LOCATION); + return r; + } + BUG_ON(!inc); + + r = dm_sm_inc_block(md->sm, md->writeset_tree_root); + if (r) { + DMERR("%s: couldn't inc writeset tree root", __func__); + dm_tm_unlock(md->tm, clone); + return r; + } + + r = dm_sm_inc_block(md->sm, md->era_array_root); + if (r) { + DMERR("%s: couldn't inc era tree root", __func__); + dm_sm_dec_block(md->sm, md->writeset_tree_root); + dm_tm_unlock(md->tm, clone); + return r; + } + + md->metadata_snap = dm_block_location(clone); + + r = dm_tm_unlock(md->tm, clone); + if (r) { + DMERR("%s: couldn't unlock clone", __func__); + md->metadata_snap = SUPERBLOCK_LOCATION; + return r; + } + + return 0; +} + +static int metadata_drop_snap(struct era_metadata *md) +{ + int r; + dm_block_t location; + struct dm_block *clone; + struct superblock_disk *disk; + + if (md->metadata_snap == SUPERBLOCK_LOCATION) { + DMERR("%s: no snap to drop", __func__); + return -EINVAL; + } + + r = dm_tm_read_lock(md->tm, md->metadata_snap, &sb_validator, &clone); + if (r) { + DMERR("%s: couldn't read lock superblock clone", __func__); + return r; + } + + /* + * Whatever happens now we'll commit with no record of the metadata + * snap. + */ + md->metadata_snap = SUPERBLOCK_LOCATION; + + disk = dm_block_data(clone); + r = dm_btree_del(&md->writeset_tree_info, + le64_to_cpu(disk->writeset_tree_root)); + if (r) { + DMERR("%s: error deleting writeset tree clone", __func__); + dm_tm_unlock(md->tm, clone); + return r; + } + + r = dm_array_del(&md->era_array_info, le64_to_cpu(disk->era_array_root)); + if (r) { + DMERR("%s: error deleting era array clone", __func__); + dm_tm_unlock(md->tm, clone); + return r; + } + + location = dm_block_location(clone); + dm_tm_unlock(md->tm, clone); + + return dm_sm_dec_block(md->sm, location); +} + +struct metadata_stats { + dm_block_t used; + dm_block_t total; + dm_block_t snap; + uint32_t era; +}; + +static int metadata_get_stats(struct era_metadata *md, void *ptr) +{ + int r; + struct metadata_stats *s = ptr; + dm_block_t nr_free, nr_total; + + r = dm_sm_get_nr_free(md->sm, &nr_free); + if (r) { + DMERR("dm_sm_get_nr_free returned %d", r); + return r; + } + + r = dm_sm_get_nr_blocks(md->sm, &nr_total); + if (r) { + DMERR("dm_pool_get_metadata_dev_size returned %d", r); + return r; + } + + s->used = nr_total - nr_free; + s->total = nr_total; + s->snap = md->metadata_snap; + s->era = md->current_era; + + return 0; +} + +/*----------------------------------------------------------------*/ + +struct era { + struct dm_target *ti; + struct dm_target_callbacks callbacks; + + struct dm_dev *metadata_dev; + struct dm_dev *origin_dev; + + dm_block_t nr_blocks; + uint32_t sectors_per_block; + int sectors_per_block_shift; + struct era_metadata *md; + + struct workqueue_struct *wq; + struct work_struct worker; + + spinlock_t deferred_lock; + struct bio_list deferred_bios; + + spinlock_t rpc_lock; + struct list_head rpc_calls; + + struct digest digest; + atomic_t suspended; +}; + +struct rpc { + struct list_head list; + + int (*fn0)(struct era_metadata *); + int (*fn1)(struct era_metadata *, void *); + void *arg; + int result; + + struct completion complete; +}; + +/*---------------------------------------------------------------- + * Remapping. + *---------------------------------------------------------------*/ +static bool block_size_is_power_of_two(struct era *era) +{ + return era->sectors_per_block_shift >= 0; +} + +static dm_block_t get_block(struct era *era, struct bio *bio) +{ + sector_t block_nr = bio->bi_iter.bi_sector; + + if (!block_size_is_power_of_two(era)) + (void) sector_div(block_nr, era->sectors_per_block); + else + block_nr >>= era->sectors_per_block_shift; + + return block_nr; +} + +static void remap_to_origin(struct era *era, struct bio *bio) +{ + bio->bi_bdev = era->origin_dev->bdev; +} + +/*---------------------------------------------------------------- + * Worker thread + *--------------------------------------------------------------*/ +static void wake_worker(struct era *era) +{ + if (!atomic_read(&era->suspended)) + queue_work(era->wq, &era->worker); +} + +static void process_old_eras(struct era *era) +{ + int r; + + if (!era->digest.step) + return; + + r = era->digest.step(era->md, &era->digest); + if (r < 0) { + DMERR("%s: digest step failed, stopping digestion", __func__); + era->digest.step = NULL; + + } else if (era->digest.step) + wake_worker(era); +} + +static void process_deferred_bios(struct era *era) +{ + int r; + struct bio_list deferred_bios, marked_bios; + struct bio *bio; + bool commit_needed = false; + bool failed = false; + + bio_list_init(&deferred_bios); + bio_list_init(&marked_bios); + + spin_lock(&era->deferred_lock); + bio_list_merge(&deferred_bios, &era->deferred_bios); + bio_list_init(&era->deferred_bios); + spin_unlock(&era->deferred_lock); + + while ((bio = bio_list_pop(&deferred_bios))) { + r = writeset_test_and_set(&era->md->bitset_info, + era->md->current_writeset, + get_block(era, bio)); + if (r < 0) { + /* + * This is bad news, we need to rollback. + * FIXME: finish. + */ + failed = true; + + } else if (r == 0) + commit_needed = true; + + bio_list_add(&marked_bios, bio); + } + + if (commit_needed) { + r = metadata_commit(era->md); + if (r) + failed = true; + } + + if (failed) + while ((bio = bio_list_pop(&marked_bios))) + bio_io_error(bio); + else + while ((bio = bio_list_pop(&marked_bios))) + generic_make_request(bio); +} + +static void process_rpc_calls(struct era *era) +{ + int r; + bool need_commit = false; + struct list_head calls; + struct rpc *rpc, *tmp; + + INIT_LIST_HEAD(&calls); + spin_lock(&era->rpc_lock); + list_splice_init(&era->rpc_calls, &calls); + spin_unlock(&era->rpc_lock); + + list_for_each_entry_safe(rpc, tmp, &calls, list) { + rpc->result = rpc->fn0 ? rpc->fn0(era->md) : rpc->fn1(era->md, rpc->arg); + need_commit = true; + } + + if (need_commit) { + r = metadata_commit(era->md); + if (r) + list_for_each_entry_safe(rpc, tmp, &calls, list) + rpc->result = r; + } + + list_for_each_entry_safe(rpc, tmp, &calls, list) + complete(&rpc->complete); +} + +static void kick_off_digest(struct era *era) +{ + if (era->md->archived_writesets) { + era->md->archived_writesets = false; + metadata_digest_start(era->md, &era->digest); + } +} + +static void do_work(struct work_struct *ws) +{ + struct era *era = container_of(ws, struct era, worker); + + kick_off_digest(era); + process_old_eras(era); + process_deferred_bios(era); + process_rpc_calls(era); +} + +static void defer_bio(struct era *era, struct bio *bio) +{ + spin_lock(&era->deferred_lock); + bio_list_add(&era->deferred_bios, bio); + spin_unlock(&era->deferred_lock); + + wake_worker(era); +} + +/* + * Make an rpc call to the worker to change the metadata. + */ +static int perform_rpc(struct era *era, struct rpc *rpc) +{ + rpc->result = 0; + init_completion(&rpc->complete); + + spin_lock(&era->rpc_lock); + list_add(&rpc->list, &era->rpc_calls); + spin_unlock(&era->rpc_lock); + + wake_worker(era); + wait_for_completion(&rpc->complete); + + return rpc->result; +} + +static int in_worker0(struct era *era, int (*fn)(struct era_metadata *)) +{ + struct rpc rpc; + rpc.fn0 = fn; + rpc.fn1 = NULL; + + return perform_rpc(era, &rpc); +} + +static int in_worker1(struct era *era, + int (*fn)(struct era_metadata *, void *), void *arg) +{ + struct rpc rpc; + rpc.fn0 = NULL; + rpc.fn1 = fn; + rpc.arg = arg; + + return perform_rpc(era, &rpc); +} + +static void start_worker(struct era *era) +{ + atomic_set(&era->suspended, 0); +} + +static void stop_worker(struct era *era) +{ + atomic_set(&era->suspended, 1); + flush_workqueue(era->wq); +} + +/*---------------------------------------------------------------- + * Target methods + *--------------------------------------------------------------*/ +static int dev_is_congested(struct dm_dev *dev, int bdi_bits) +{ + struct request_queue *q = bdev_get_queue(dev->bdev); + return bdi_congested(&q->backing_dev_info, bdi_bits); +} + +static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits) +{ + struct era *era = container_of(cb, struct era, callbacks); + return dev_is_congested(era->origin_dev, bdi_bits); +} + +static void era_destroy(struct era *era) +{ + metadata_close(era->md); + + if (era->wq) + destroy_workqueue(era->wq); + + if (era->origin_dev) + dm_put_device(era->ti, era->origin_dev); + + if (era->metadata_dev) + dm_put_device(era->ti, era->metadata_dev); + + kfree(era); +} + +static dm_block_t calc_nr_blocks(struct era *era) +{ + return dm_sector_div_up(era->ti->len, era->sectors_per_block); +} + +static bool valid_block_size(dm_block_t block_size) +{ + bool greater_than_zero = block_size > 0; + bool multiple_of_min_block_size = (block_size & (MIN_BLOCK_SIZE - 1)) == 0; + + return greater_than_zero && multiple_of_min_block_size; +} + +/* + * + */ +static int era_ctr(struct dm_target *ti, unsigned argc, char **argv) +{ + int r; + char dummy; + struct era *era; + struct era_metadata *md; + + if (argc != 3) { + ti->error = "Invalid argument count"; + return -EINVAL; + } + + era = kzalloc(sizeof(*era), GFP_KERNEL); + if (!era) { + ti->error = "Error allocating era structure"; + return -ENOMEM; + } + + era->ti = ti; + + r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &era->metadata_dev); + if (r) { + ti->error = "Error opening metadata device"; + era_destroy(era); + return -EINVAL; + } + + r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &era->origin_dev); + if (r) { + ti->error = "Error opening data device"; + era_destroy(era); + return -EINVAL; + } + + r = sscanf(argv[2], "%u%c", &era->sectors_per_block, &dummy); + if (r != 1) { + ti->error = "Error parsing block size"; + era_destroy(era); + return -EINVAL; + } + + r = dm_set_target_max_io_len(ti, era->sectors_per_block); + if (r) { + ti->error = "could not set max io len"; + era_destroy(era); + return -EINVAL; + } + + if (!valid_block_size(era->sectors_per_block)) { + ti->error = "Invalid block size"; + era_destroy(era); + return -EINVAL; + } + if (era->sectors_per_block & (era->sectors_per_block - 1)) + era->sectors_per_block_shift = -1; + else + era->sectors_per_block_shift = __ffs(era->sectors_per_block); + + md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true); + if (IS_ERR(md)) { + ti->error = "Error reading metadata"; + era_destroy(era); + return PTR_ERR(md); + } + era->md = md; + + era->nr_blocks = calc_nr_blocks(era); + + r = metadata_resize(era->md, &era->nr_blocks); + if (r) { + ti->error = "couldn't resize metadata"; + era_destroy(era); + return -ENOMEM; + } + + era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); + if (!era->wq) { + ti->error = "could not create workqueue for metadata object"; + era_destroy(era); + return -ENOMEM; + } + INIT_WORK(&era->worker, do_work); + + spin_lock_init(&era->deferred_lock); + bio_list_init(&era->deferred_bios); + + spin_lock_init(&era->rpc_lock); + INIT_LIST_HEAD(&era->rpc_calls); + + ti->private = era; + ti->num_flush_bios = 1; + ti->flush_supported = true; + + ti->num_discard_bios = 1; + ti->discards_supported = true; + era->callbacks.congested_fn = era_is_congested; + dm_table_add_target_callbacks(ti->table, &era->callbacks); + + return 0; +} + +static void era_dtr(struct dm_target *ti) +{ + era_destroy(ti->private); +} + +static int era_map(struct dm_target *ti, struct bio *bio) +{ + struct era *era = ti->private; + dm_block_t block = get_block(era, bio); + + /* + * All bios get remapped to the origin device. We do this now, but + * it may not get issued until later. Depending on whether the + * block is marked in this era. + */ + remap_to_origin(era, bio); + + /* + * REQ_FLUSH bios carry no data, so we're not interested in them. + */ + if (!(bio->bi_rw & REQ_FLUSH) && + (bio_data_dir(bio) == WRITE) && + !metadata_current_marked(era->md, block)) { + defer_bio(era, bio); + return DM_MAPIO_SUBMITTED; + } + + return DM_MAPIO_REMAPPED; +} + +static void era_postsuspend(struct dm_target *ti) +{ + int r; + struct era *era = ti->private; + + r = in_worker0(era, metadata_era_archive); + if (r) { + DMERR("%s: couldn't archive current era", __func__); + /* FIXME: fail mode */ + } + + stop_worker(era); +} + +static int era_preresume(struct dm_target *ti) +{ + int r; + struct era *era = ti->private; + dm_block_t new_size = calc_nr_blocks(era); + + if (era->nr_blocks != new_size) { + r = in_worker1(era, metadata_resize, &new_size); + if (r) + return r; + + era->nr_blocks = new_size; + } + + start_worker(era); + + r = in_worker0(era, metadata_new_era); + if (r) { + DMERR("%s: metadata_era_rollover failed", __func__); + return r; + } + + return 0; +} + +/* + * Status format: + * + * <#used metadata blocks>/<#total metadata blocks> + * + */ +static void era_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen) +{ + int r; + struct era *era = ti->private; + ssize_t sz = 0; + struct metadata_stats stats; + char buf[BDEVNAME_SIZE]; + + switch (type) { + case STATUSTYPE_INFO: + r = in_worker1(era, metadata_get_stats, &stats); + if (r) + goto err; + + DMEMIT("%u %llu/%llu %u", + (unsigned) (DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT), + (unsigned long long) stats.used, + (unsigned long long) stats.total, + (unsigned) stats.era); + + if (stats.snap != SUPERBLOCK_LOCATION) + DMEMIT(" %llu", stats.snap); + else + DMEMIT(" -"); + break; + + case STATUSTYPE_TABLE: + format_dev_t(buf, era->metadata_dev->bdev->bd_dev); + DMEMIT("%s ", buf); + format_dev_t(buf, era->origin_dev->bdev->bd_dev); + DMEMIT("%s %u", buf, era->sectors_per_block); + break; + } + + return; + +err: + DMEMIT("Error"); +} + +static int era_message(struct dm_target *ti, unsigned argc, char **argv) +{ + struct era *era = ti->private; + + if (argc != 1) { + DMERR("incorrect number of message arguments"); + return -EINVAL; + } + + if (!strcasecmp(argv[0], "checkpoint")) + return in_worker0(era, metadata_checkpoint); + + if (!strcasecmp(argv[0], "take_metadata_snap")) + return in_worker0(era, metadata_take_snap); + + if (!strcasecmp(argv[0], "drop_metadata_snap")) + return in_worker0(era, metadata_drop_snap); + + DMERR("unsupported message '%s'", argv[0]); + return -EINVAL; +} + +static sector_t get_dev_size(struct dm_dev *dev) +{ + return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; +} + +static int era_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data) +{ + struct era *era = ti->private; + return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data); +} + +static int era_merge(struct dm_target *ti, struct bvec_merge_data *bvm, + struct bio_vec *biovec, int max_size) +{ + struct era *era = ti->private; + struct request_queue *q = bdev_get_queue(era->origin_dev->bdev); + + if (!q->merge_bvec_fn) + return max_size; + + bvm->bi_bdev = era->origin_dev->bdev; + + return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); +} + +static void era_io_hints(struct dm_target *ti, struct queue_limits *limits) +{ + struct era *era = ti->private; + uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT; + + /* + * If the system-determined stacked limits are compatible with the + * era device's blocksize (io_opt is a factor) do not override them. + */ + if (io_opt_sectors < era->sectors_per_block || + do_div(io_opt_sectors, era->sectors_per_block)) { + blk_limits_io_min(limits, 0); + blk_limits_io_opt(limits, era->sectors_per_block << SECTOR_SHIFT); + } +} + +/*----------------------------------------------------------------*/ + +static struct target_type era_target = { + .name = "era", + .version = {1, 0, 0}, + .module = THIS_MODULE, + .ctr = era_ctr, + .dtr = era_dtr, + .map = era_map, + .postsuspend = era_postsuspend, + .preresume = era_preresume, + .status = era_status, + .message = era_message, + .iterate_devices = era_iterate_devices, + .merge = era_merge, + .io_hints = era_io_hints +}; + +static int __init dm_era_init(void) +{ + int r; + + r = dm_register_target(&era_target); + if (r) { + DMERR("era target registration failed: %d", r); + return r; + } + + return 0; +} + +static void __exit dm_era_exit(void) +{ + dm_unregister_target(&era_target); +} + +module_init(dm_era_init); +module_exit(dm_era_exit); + +MODULE_DESCRIPTION(DM_NAME " era target"); +MODULE_AUTHOR("Joe Thornber "); +MODULE_LICENSE("GPL"); -- cgit v0.10.2 From 428e4698642794444cdb26c148a827f22c28d546 Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Mon, 3 Mar 2014 10:37:24 -0500 Subject: dm bitset: only flush the current word if it has been dirtied This change offers a big performance boost for dm-era. Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer diff --git a/drivers/md/persistent-data/dm-bitset.c b/drivers/md/persistent-data/dm-bitset.c index cd9a86d..36f7cc2 100644 --- a/drivers/md/persistent-data/dm-bitset.c +++ b/drivers/md/persistent-data/dm-bitset.c @@ -65,7 +65,7 @@ int dm_bitset_flush(struct dm_disk_bitset *info, dm_block_t root, int r; __le64 value; - if (!info->current_index_set) + if (!info->current_index_set || !info->dirty) return 0; value = cpu_to_le64(info->current_bits); @@ -77,6 +77,8 @@ int dm_bitset_flush(struct dm_disk_bitset *info, dm_block_t root, return r; info->current_index_set = false; + info->dirty = false; + return 0; } EXPORT_SYMBOL_GPL(dm_bitset_flush); @@ -94,6 +96,8 @@ static int read_bits(struct dm_disk_bitset *info, dm_block_t root, info->current_bits = le64_to_cpu(value); info->current_index_set = true; info->current_index = array_index; + info->dirty = false; + return 0; } @@ -126,6 +130,8 @@ int dm_bitset_set_bit(struct dm_disk_bitset *info, dm_block_t root, return r; set_bit(b, (unsigned long *) &info->current_bits); + info->dirty = true; + return 0; } EXPORT_SYMBOL_GPL(dm_bitset_set_bit); @@ -141,6 +147,8 @@ int dm_bitset_clear_bit(struct dm_disk_bitset *info, dm_block_t root, return r; clear_bit(b, (unsigned long *) &info->current_bits); + info->dirty = true; + return 0; } EXPORT_SYMBOL_GPL(dm_bitset_clear_bit); diff --git a/drivers/md/persistent-data/dm-bitset.h b/drivers/md/persistent-data/dm-bitset.h index e1b9bea..c2287d6 100644 --- a/drivers/md/persistent-data/dm-bitset.h +++ b/drivers/md/persistent-data/dm-bitset.h @@ -71,6 +71,7 @@ struct dm_disk_bitset { uint64_t current_bits; bool current_index_set:1; + bool dirty:1; }; /* -- cgit v0.10.2 From d132cc6d9e92424bb9d4fd35f5bd0e55d583f4be Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Thu, 20 Mar 2014 10:11:15 -0400 Subject: dm cache: prevent corruption caused by discard_block_size > cache_block_size If the discard block size is larger than the cache block size we will not properly quiesce IO to a region that is about to be discarded. This results in a race between a cache migration where no copy is needed, and a write to an adjacent cache block that's within the same large discard block. Workaround this by limiting the discard_block_size to cache_block_size. Also limit the max_discard_sectors to cache_block_size. A more comprehensive fix that introduces range locking support in the bio_prison and proper quiescing of a discard range that spans multiple cache blocks is already in development. Reported-by: Morgan Mears Signed-off-by: Mike Snitzer Acked-by: Joe Thornber Acked-by: Heinz Mauelshagen Cc: stable@vger.kernel.org diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 074b9c8..bccb7ae 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -239,7 +239,7 @@ struct cache { */ dm_dblock_t discard_nr_blocks; unsigned long *discard_bitset; - uint32_t discard_block_size; /* a power of 2 times sectors per block */ + uint32_t discard_block_size; /* * Rather than reconstructing the table line for the status we just @@ -2171,35 +2171,6 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca, return 0; } -/* - * We want the discard block size to be a power of two, at least the size - * of the cache block size, and have no more than 2^14 discard blocks - * across the origin. - */ -#define MAX_DISCARD_BLOCKS (1 << 14) - -static bool too_many_discard_blocks(sector_t discard_block_size, - sector_t origin_size) -{ - (void) sector_div(origin_size, discard_block_size); - - return origin_size > MAX_DISCARD_BLOCKS; -} - -static sector_t calculate_discard_block_size(sector_t cache_block_size, - sector_t origin_size) -{ - sector_t discard_block_size; - - discard_block_size = roundup_pow_of_two(cache_block_size); - - if (origin_size) - while (too_many_discard_blocks(discard_block_size, origin_size)) - discard_block_size *= 2; - - return discard_block_size; -} - #define DEFAULT_MIGRATION_THRESHOLD 2048 static int cache_create(struct cache_args *ca, struct cache **result) @@ -2321,9 +2292,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) } clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); - cache->discard_block_size = - calculate_discard_block_size(cache->sectors_per_block, - cache->origin_sectors); + cache->discard_block_size = cache->sectors_per_block; cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks); cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); if (!cache->discard_bitset) { @@ -3120,7 +3089,7 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits) /* * FIXME: these limits may be incompatible with the cache device */ - limits->max_discard_sectors = cache->discard_block_size * 1024; + limits->max_discard_sectors = cache->discard_block_size; limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; } -- cgit v0.10.2 From 64ab346a360a4b15c28fb8531918d4a01f4eabd9 Mon Sep 17 00:00:00 2001 From: Heinz Mauelshagen Date: Thu, 27 Mar 2014 15:14:10 -0400 Subject: dm cache: remove remainder of distinct discard block size Discard block size not being equal to cache block size causes data corruption by erroneously avoiding migrations in issue_copy() because the discard state is being cleared for a group of cache blocks when it should not. Completely remove all code that enabled a distinction between the cache block size and discard block size. Signed-off-by: Heinz Mauelshagen Signed-off-by: Mike Snitzer diff --git a/drivers/md/dm-cache-block-types.h b/drivers/md/dm-cache-block-types.h index bed4ad4..aac0e2d 100644 --- a/drivers/md/dm-cache-block-types.h +++ b/drivers/md/dm-cache-block-types.h @@ -19,7 +19,6 @@ typedef dm_block_t __bitwise__ dm_oblock_t; typedef uint32_t __bitwise__ dm_cblock_t; -typedef dm_block_t __bitwise__ dm_dblock_t; static inline dm_oblock_t to_oblock(dm_block_t b) { @@ -41,14 +40,4 @@ static inline uint32_t from_cblock(dm_cblock_t b) return (__force uint32_t) b; } -static inline dm_dblock_t to_dblock(dm_block_t b) -{ - return (__force dm_dblock_t) b; -} - -static inline dm_block_t from_dblock(dm_dblock_t b) -{ - return (__force dm_block_t) b; -} - #endif /* DM_CACHE_BLOCK_TYPES_H */ diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 9ef0752..cbd568a 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -109,7 +109,7 @@ struct dm_cache_metadata { dm_block_t discard_root; sector_t discard_block_size; - dm_dblock_t discard_nr_blocks; + dm_oblock_t discard_nr_blocks; sector_t data_block_size; dm_cblock_t cache_blocks; @@ -302,7 +302,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd) disk_super->hint_root = cpu_to_le64(cmd->hint_root); disk_super->discard_root = cpu_to_le64(cmd->discard_root); disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size); - disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks)); + disk_super->discard_nr_blocks = cpu_to_le64(from_oblock(cmd->discard_nr_blocks)); disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); disk_super->data_block_size = cpu_to_le32(cmd->data_block_size); disk_super->cache_blocks = cpu_to_le32(0); @@ -496,7 +496,7 @@ static void read_superblock_fields(struct dm_cache_metadata *cmd, cmd->hint_root = le64_to_cpu(disk_super->hint_root); cmd->discard_root = le64_to_cpu(disk_super->discard_root); cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size); - cmd->discard_nr_blocks = to_dblock(le64_to_cpu(disk_super->discard_nr_blocks)); + cmd->discard_nr_blocks = to_oblock(le64_to_cpu(disk_super->discard_nr_blocks)); cmd->data_block_size = le32_to_cpu(disk_super->data_block_size); cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks)); strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name)); @@ -594,7 +594,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd, disk_super->hint_root = cpu_to_le64(cmd->hint_root); disk_super->discard_root = cpu_to_le64(cmd->discard_root); disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size); - disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks)); + disk_super->discard_nr_blocks = cpu_to_le64(from_oblock(cmd->discard_nr_blocks)); disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks)); strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name)); disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]); @@ -771,15 +771,15 @@ out: int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd, sector_t discard_block_size, - dm_dblock_t new_nr_entries) + dm_oblock_t new_nr_entries) { int r; down_write(&cmd->root_lock); r = dm_bitset_resize(&cmd->discard_info, cmd->discard_root, - from_dblock(cmd->discard_nr_blocks), - from_dblock(new_nr_entries), + from_oblock(cmd->discard_nr_blocks), + from_oblock(new_nr_entries), false, &cmd->discard_root); if (!r) { cmd->discard_block_size = discard_block_size; @@ -792,28 +792,28 @@ int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd, return r; } -static int __set_discard(struct dm_cache_metadata *cmd, dm_dblock_t b) +static int __set_discard(struct dm_cache_metadata *cmd, dm_oblock_t b) { return dm_bitset_set_bit(&cmd->discard_info, cmd->discard_root, - from_dblock(b), &cmd->discard_root); + from_oblock(b), &cmd->discard_root); } -static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b) +static int __clear_discard(struct dm_cache_metadata *cmd, dm_oblock_t b) { return dm_bitset_clear_bit(&cmd->discard_info, cmd->discard_root, - from_dblock(b), &cmd->discard_root); + from_oblock(b), &cmd->discard_root); } -static int __is_discarded(struct dm_cache_metadata *cmd, dm_dblock_t b, +static int __is_discarded(struct dm_cache_metadata *cmd, dm_oblock_t b, bool *is_discarded) { return dm_bitset_test_bit(&cmd->discard_info, cmd->discard_root, - from_dblock(b), &cmd->discard_root, + from_oblock(b), &cmd->discard_root, is_discarded); } static int __discard(struct dm_cache_metadata *cmd, - dm_dblock_t dblock, bool discard) + dm_oblock_t dblock, bool discard) { int r; @@ -826,7 +826,7 @@ static int __discard(struct dm_cache_metadata *cmd, } int dm_cache_set_discard(struct dm_cache_metadata *cmd, - dm_dblock_t dblock, bool discard) + dm_oblock_t dblock, bool discard) { int r; @@ -844,8 +844,8 @@ static int __load_discards(struct dm_cache_metadata *cmd, dm_block_t b; bool discard; - for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) { - dm_dblock_t dblock = to_dblock(b); + for (b = 0; b < from_oblock(cmd->discard_nr_blocks); b++) { + dm_oblock_t dblock = to_oblock(b); if (cmd->clean_when_opened) { r = __is_discarded(cmd, dblock, &discard); diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h index cd906f1..ce8468b 100644 --- a/drivers/md/dm-cache-metadata.h +++ b/drivers/md/dm-cache-metadata.h @@ -72,14 +72,14 @@ dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd); int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd, sector_t discard_block_size, - dm_dblock_t new_nr_entries); + dm_oblock_t new_nr_entries); typedef int (*load_discard_fn)(void *context, sector_t discard_block_size, - dm_dblock_t dblock, bool discarded); + dm_oblock_t dblock, bool discarded); int dm_cache_load_discards(struct dm_cache_metadata *cmd, load_discard_fn fn, void *context); -int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_dblock_t dblock, bool discard); +int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_oblock_t dblock, bool discard); int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock); int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock); diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index bccb7ae..8534679 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -237,9 +237,8 @@ struct cache { /* * origin_blocks entries, discarded if set. */ - dm_dblock_t discard_nr_blocks; + dm_oblock_t discard_nr_blocks; unsigned long *discard_bitset; - uint32_t discard_block_size; /* * Rather than reconstructing the table line for the status we just @@ -526,48 +525,33 @@ static dm_block_t block_div(dm_block_t b, uint32_t n) return b; } -static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) -{ - uint32_t discard_blocks = cache->discard_block_size; - dm_block_t b = from_oblock(oblock); - - if (!block_size_is_power_of_two(cache)) - discard_blocks = discard_blocks / cache->sectors_per_block; - else - discard_blocks >>= cache->sectors_per_block_shift; - - b = block_div(b, discard_blocks); - - return to_dblock(b); -} - -static void set_discard(struct cache *cache, dm_dblock_t b) +static void set_discard(struct cache *cache, dm_oblock_t b) { unsigned long flags; atomic_inc(&cache->stats.discard_count); spin_lock_irqsave(&cache->lock, flags); - set_bit(from_dblock(b), cache->discard_bitset); + set_bit(from_oblock(b), cache->discard_bitset); spin_unlock_irqrestore(&cache->lock, flags); } -static void clear_discard(struct cache *cache, dm_dblock_t b) +static void clear_discard(struct cache *cache, dm_oblock_t b) { unsigned long flags; spin_lock_irqsave(&cache->lock, flags); - clear_bit(from_dblock(b), cache->discard_bitset); + clear_bit(from_oblock(b), cache->discard_bitset); spin_unlock_irqrestore(&cache->lock, flags); } -static bool is_discarded(struct cache *cache, dm_dblock_t b) +static bool is_discarded(struct cache *cache, dm_oblock_t b) { int r; unsigned long flags; spin_lock_irqsave(&cache->lock, flags); - r = test_bit(from_dblock(b), cache->discard_bitset); + r = test_bit(from_oblock(b), cache->discard_bitset); spin_unlock_irqrestore(&cache->lock, flags); return r; @@ -579,8 +563,7 @@ static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b) unsigned long flags; spin_lock_irqsave(&cache->lock, flags); - r = test_bit(from_dblock(oblock_to_dblock(cache, b)), - cache->discard_bitset); + r = test_bit(from_oblock(b), cache->discard_bitset); spin_unlock_irqrestore(&cache->lock, flags); return r; @@ -705,7 +688,7 @@ static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, check_if_tick_bio_needed(cache, bio); remap_to_origin(cache, bio); if (bio_data_dir(bio) == WRITE) - clear_discard(cache, oblock_to_dblock(cache, oblock)); + clear_discard(cache, oblock); } static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, @@ -715,7 +698,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, remap_to_cache(cache, bio, cblock); if (bio_data_dir(bio) == WRITE) { set_dirty(cache, oblock, cblock); - clear_discard(cache, oblock_to_dblock(cache, oblock)); + clear_discard(cache, oblock); } } @@ -1288,14 +1271,14 @@ static void process_flush_bio(struct cache *cache, struct bio *bio) static void process_discard_bio(struct cache *cache, struct bio *bio) { dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector, - cache->discard_block_size); + cache->sectors_per_block); dm_block_t end_block = bio_end_sector(bio); dm_block_t b; - end_block = block_div(end_block, cache->discard_block_size); + end_block = block_div(end_block, cache->sectors_per_block); for (b = start_block; b < end_block; b++) - set_discard(cache, to_dblock(b)); + set_discard(cache, to_oblock(b)); bio_endio(bio, 0); } @@ -2292,14 +2275,13 @@ static int cache_create(struct cache_args *ca, struct cache **result) } clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); - cache->discard_block_size = cache->sectors_per_block; - cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks); - cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); + cache->discard_nr_blocks = cache->origin_blocks; + cache->discard_bitset = alloc_bitset(from_oblock(cache->discard_nr_blocks)); if (!cache->discard_bitset) { *error = "could not allocate discard bitset"; goto bad; } - clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); + clear_bitset(cache->discard_bitset, from_oblock(cache->discard_nr_blocks)); cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); if (IS_ERR(cache->copier)) { @@ -2583,16 +2565,16 @@ static int write_discard_bitset(struct cache *cache) { unsigned i, r; - r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size, - cache->discard_nr_blocks); + r = dm_cache_discard_bitset_resize(cache->cmd, cache->sectors_per_block, + cache->origin_blocks); if (r) { DMERR("could not resize on-disk discard bitset"); return r; } - for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) { - r = dm_cache_set_discard(cache->cmd, to_dblock(i), - is_discarded(cache, to_dblock(i))); + for (i = 0; i < from_oblock(cache->discard_nr_blocks); i++) { + r = dm_cache_set_discard(cache->cmd, to_oblock(i), + is_discarded(cache, to_oblock(i))); if (r) return r; } @@ -2689,16 +2671,14 @@ static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock, } static int load_discard(void *context, sector_t discard_block_size, - dm_dblock_t dblock, bool discard) + dm_oblock_t oblock, bool discard) { struct cache *cache = context; - /* FIXME: handle mis-matched block size */ - if (discard) - set_discard(cache, dblock); + set_discard(cache, oblock); else - clear_discard(cache, dblock); + clear_discard(cache, oblock); return 0; } @@ -3089,8 +3069,8 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits) /* * FIXME: these limits may be incompatible with the cache device */ - limits->max_discard_sectors = cache->discard_block_size; - limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; + limits->max_discard_sectors = cache->sectors_per_block; + limits->discard_granularity = cache->sectors_per_block << SECTOR_SHIFT; } static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) -- cgit v0.10.2 From a9d45396f5956d0b615c7ae3b936afd888351a47 Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Thu, 27 Mar 2014 14:13:20 +0000 Subject: dm transaction manager: fix corruption due to non-atomic transaction commit The persistent-data library used by dm-thin, dm-cache, etc is transactional. If anything goes wrong, such as an io error when writing new metadata or a power failure, then we roll back to the last transaction. Atomicity when committing a transaction is achieved by: a) Never overwriting data from the previous transaction. b) Writing the superblock last, after all other metadata has hit the disk. This commit and the following commit ("dm: take care to copy the space map roots before locking the superblock") fix a bug associated with (b). When committing it was possible for the superblock to still be written in spite of an io error occurring during the preceeding metadata flush. With these commits we're careful not to take the write lock out on the superblock until after the metadata flush has completed. Change the transaction manager's semantics for dm_tm_commit() to assume all data has been flushed _before_ the single superblock that is passed in. As a prerequisite, split the block manager's block unlocking and flushing by simplifying dm_bm_flush_and_unlock() to dm_bm_flush(). Now the unlocking must be done separately. This issue was discovered by forcing io errors at the crucial time using dm-flakey. Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer Cc: stable@vger.kernel.org diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index cbd568a..4b73abe 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -530,8 +530,9 @@ static int __begin_transaction_flags(struct dm_cache_metadata *cmd, disk_super = dm_block_data(sblock); update_flags(disk_super, mutator); read_superblock_fields(cmd, disk_super); + dm_bm_unlock(sblock); - return dm_bm_flush_and_unlock(cmd->bm, sblock); + return dm_bm_flush(cmd->bm); } static int __begin_transaction(struct dm_cache_metadata *cmd) diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c index 455f792..087411c 100644 --- a/drivers/md/persistent-data/dm-block-manager.c +++ b/drivers/md/persistent-data/dm-block-manager.c @@ -595,25 +595,14 @@ int dm_bm_unlock(struct dm_block *b) } EXPORT_SYMBOL_GPL(dm_bm_unlock); -int dm_bm_flush_and_unlock(struct dm_block_manager *bm, - struct dm_block *superblock) +int dm_bm_flush(struct dm_block_manager *bm) { - int r; - if (bm->read_only) return -EPERM; - r = dm_bufio_write_dirty_buffers(bm->bufio); - if (unlikely(r)) { - dm_bm_unlock(superblock); - return r; - } - - dm_bm_unlock(superblock); - return dm_bufio_write_dirty_buffers(bm->bufio); } -EXPORT_SYMBOL_GPL(dm_bm_flush_and_unlock); +EXPORT_SYMBOL_GPL(dm_bm_flush); void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b) { diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h index 13cd58e..1b95dfc 100644 --- a/drivers/md/persistent-data/dm-block-manager.h +++ b/drivers/md/persistent-data/dm-block-manager.h @@ -105,8 +105,7 @@ int dm_bm_unlock(struct dm_block *b); * * This method always blocks. */ -int dm_bm_flush_and_unlock(struct dm_block_manager *bm, - struct dm_block *superblock); +int dm_bm_flush(struct dm_block_manager *bm); /* * Request data is prefetched into the cache. diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c index 81da1a2..3bc30a0 100644 --- a/drivers/md/persistent-data/dm-transaction-manager.c +++ b/drivers/md/persistent-data/dm-transaction-manager.c @@ -154,7 +154,7 @@ int dm_tm_pre_commit(struct dm_transaction_manager *tm) if (r < 0) return r; - return 0; + return dm_bm_flush(tm->bm); } EXPORT_SYMBOL_GPL(dm_tm_pre_commit); @@ -164,8 +164,9 @@ int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root) return -EWOULDBLOCK; wipe_shadow_table(tm); + dm_bm_unlock(root); - return dm_bm_flush_and_unlock(tm->bm, root); + return dm_bm_flush(tm->bm); } EXPORT_SYMBOL_GPL(dm_tm_commit); diff --git a/drivers/md/persistent-data/dm-transaction-manager.h b/drivers/md/persistent-data/dm-transaction-manager.h index b5b1390..2772ed2 100644 --- a/drivers/md/persistent-data/dm-transaction-manager.h +++ b/drivers/md/persistent-data/dm-transaction-manager.h @@ -38,18 +38,17 @@ struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transac /* * We use a 2-phase commit here. * - * i) In the first phase the block manager is told to start flushing, and - * the changes to the space map are written to disk. You should interrogate - * your particular space map to get detail of its root node etc. to be - * included in your superblock. + * i) Make all changes for the transaction *except* for the superblock. + * Then call dm_tm_pre_commit() to flush them to disk. * - * ii) @root will be committed last. You shouldn't use more than the - * first 512 bytes of @root if you wish the transaction to survive a power - * failure. You *must* have a write lock held on @root for both stage (i) - * and (ii). The commit will drop the write lock. + * ii) Lock your superblock. Update. Then call dm_tm_commit() which will + * unlock the superblock and flush it. No other blocks should be updated + * during this period. Care should be taken to never unlock a partially + * updated superblock; perform any operations that could fail *before* you + * take the superblock lock. */ int dm_tm_pre_commit(struct dm_transaction_manager *tm); -int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root); +int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *superblock); /* * These methods are the only way to get hold of a writeable block. -- cgit v0.10.2 From 5a32083d03fb543f63489b2946c4948398579ba0 Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Thu, 27 Mar 2014 14:13:23 +0000 Subject: dm: take care to copy the space map roots before locking the superblock In theory copying the space map root can fail, but in practice it never does because we're careful to check what size buffer is needed. But make certain we're able to copy the space map roots before locking the superblock. Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer Cc: stable@vger.kernel.org # drop dm-era and dm-cache changes as needed diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 4b73abe..66f102a 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -120,6 +120,12 @@ struct dm_cache_metadata { unsigned policy_version[CACHE_POLICY_VERSION_SIZE]; size_t policy_hint_size; struct dm_cache_statistics stats; + + /* + * Reading the space map root can fail, so we read it into this + * buffer before the superblock is locked and updated. + */ + __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE]; }; /*------------------------------------------------------------------- @@ -260,11 +266,31 @@ static void __setup_mapping_info(struct dm_cache_metadata *cmd) } } +static int __save_sm_root(struct dm_cache_metadata *cmd) +{ + int r; + size_t metadata_len; + + r = dm_sm_root_size(cmd->metadata_sm, &metadata_len); + if (r < 0) + return r; + + return dm_sm_copy_root(cmd->metadata_sm, &cmd->metadata_space_map_root, + metadata_len); +} + +static void __copy_sm_root(struct dm_cache_metadata *cmd, + struct cache_disk_superblock *disk_super) +{ + memcpy(&disk_super->metadata_space_map_root, + &cmd->metadata_space_map_root, + sizeof(cmd->metadata_space_map_root)); +} + static int __write_initial_superblock(struct dm_cache_metadata *cmd) { int r; struct dm_block *sblock; - size_t metadata_len; struct cache_disk_superblock *disk_super; sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT; @@ -272,12 +298,16 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd) if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS) bdev_size = DM_CACHE_METADATA_MAX_SECTORS; - r = dm_sm_root_size(cmd->metadata_sm, &metadata_len); + r = dm_tm_pre_commit(cmd->tm); if (r < 0) return r; - r = dm_tm_pre_commit(cmd->tm); - if (r < 0) + /* + * dm_sm_copy_root() can fail. So we need to do it before we start + * updating the superblock. + */ + r = __save_sm_root(cmd); + if (r) return r; r = superblock_lock_zero(cmd, &sblock); @@ -293,10 +323,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd) memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version)); disk_super->policy_hint_size = 0; - r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root, - metadata_len); - if (r < 0) - goto bad_locked; + __copy_sm_root(cmd, disk_super); disk_super->mapping_root = cpu_to_le64(cmd->root); disk_super->hint_root = cpu_to_le64(cmd->hint_root); @@ -313,10 +340,6 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd) disk_super->write_misses = cpu_to_le32(0); return dm_tm_commit(cmd->tm, sblock); - -bad_locked: - dm_bm_unlock(sblock); - return r; } static int __format_metadata(struct dm_cache_metadata *cmd) @@ -560,7 +583,6 @@ static int __commit_transaction(struct dm_cache_metadata *cmd, flags_mutator mutator) { int r; - size_t metadata_len; struct cache_disk_superblock *disk_super; struct dm_block *sblock; @@ -578,8 +600,8 @@ static int __commit_transaction(struct dm_cache_metadata *cmd, if (r < 0) return r; - r = dm_sm_root_size(cmd->metadata_sm, &metadata_len); - if (r < 0) + r = __save_sm_root(cmd); + if (r) return r; r = superblock_lock(cmd, &sblock); @@ -606,13 +628,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd, disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses); disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits); disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses); - - r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root, - metadata_len); - if (r < 0) { - dm_bm_unlock(sblock); - return r; - } + __copy_sm_root(cmd, disk_super); return dm_tm_commit(cmd->tm, sblock); } diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index 03d9560..414dad4 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -289,6 +289,12 @@ struct era_metadata { * A flag that is set whenever a writeset has been archived. */ bool archived_writesets; + + /* + * Reading the space map root can fail, so we read it into this + * buffer before the superblock is locked and updated. + */ + __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE]; }; static int superblock_read_lock(struct era_metadata *md, @@ -453,16 +459,33 @@ bad: return r; } +static int save_sm_root(struct era_metadata *md) +{ + int r; + size_t metadata_len; + + r = dm_sm_root_size(md->sm, &metadata_len); + if (r < 0) + return r; + + return dm_sm_copy_root(md->sm, &md->metadata_space_map_root, + metadata_len); +} + +static void copy_sm_root(struct era_metadata *md, struct superblock_disk *disk) +{ + memcpy(&disk->metadata_space_map_root, + &md->metadata_space_map_root, + sizeof(md->metadata_space_map_root)); +} + /* * Writes a superblock, including the static fields that don't get updated * with every commit (possible optimisation here). 'md' should be fully * constructed when this is called. */ -static int prepare_superblock(struct era_metadata *md, struct superblock_disk *disk) +static void prepare_superblock(struct era_metadata *md, struct superblock_disk *disk) { - int r; - size_t metadata_len; - disk->magic = cpu_to_le64(SUPERBLOCK_MAGIC); disk->flags = cpu_to_le32(0ul); @@ -470,14 +493,7 @@ static int prepare_superblock(struct era_metadata *md, struct superblock_disk *d memset(disk->uuid, 0, sizeof(disk->uuid)); disk->version = cpu_to_le32(MAX_ERA_VERSION); - r = dm_sm_root_size(md->sm, &metadata_len); - if (r < 0) - return r; - - r = dm_sm_copy_root(md->sm, &disk->metadata_space_map_root, - metadata_len); - if (r < 0) - return r; + copy_sm_root(md, disk); disk->data_block_size = cpu_to_le32(md->block_size); disk->metadata_block_size = cpu_to_le32(DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); @@ -488,8 +504,6 @@ static int prepare_superblock(struct era_metadata *md, struct superblock_disk *d disk->writeset_tree_root = cpu_to_le64(md->writeset_tree_root); disk->era_array_root = cpu_to_le64(md->era_array_root); disk->metadata_snap = cpu_to_le64(md->metadata_snap); - - return 0; } static int write_superblock(struct era_metadata *md) @@ -498,17 +512,18 @@ static int write_superblock(struct era_metadata *md) struct dm_block *sblock; struct superblock_disk *disk; + r = save_sm_root(md); + if (r) { + DMERR("%s: save_sm_root failed", __func__); + return r; + } + r = superblock_lock_zero(md, &sblock); if (r) return r; disk = dm_block_data(sblock); - r = prepare_superblock(md, disk); - if (r) { - DMERR("%s: prepare_superblock failed", __func__); - dm_bm_unlock(sblock); /* FIXME: does this commit? */ - return r; - } + prepare_superblock(md, disk); return dm_tm_commit(md->tm, sblock); } @@ -942,6 +957,12 @@ static int metadata_commit(struct era_metadata *md) } } + r = save_sm_root(md); + if (r) { + DMERR("%s: save_sm_root failed", __func__); + return r; + } + r = dm_tm_pre_commit(md->tm); if (r) { DMERR("%s: pre commit failed", __func__); @@ -954,12 +975,7 @@ static int metadata_commit(struct era_metadata *md) return r; } - r = prepare_superblock(md, dm_block_data(sblock)); - if (r) { - DMERR("%s: prepare_superblock failed", __func__); - dm_bm_unlock(sblock); /* FIXME: does this commit? */ - return r; - } + prepare_superblock(md, dm_block_data(sblock)); return dm_tm_commit(md->tm, sblock); } diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index fb9efc8..b086a94 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -192,6 +192,13 @@ struct dm_pool_metadata { * operation possible in this state is the closing of the device. */ bool fail_io:1; + + /* + * Reading the space map roots can fail, so we read it into these + * buffers before the superblock is locked and updated. + */ + __u8 data_space_map_root[SPACE_MAP_ROOT_SIZE]; + __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE]; }; struct dm_thin_device { @@ -431,26 +438,53 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd) pmd->details_info.value_type.equal = NULL; } +static int save_sm_roots(struct dm_pool_metadata *pmd) +{ + int r; + size_t len; + + r = dm_sm_root_size(pmd->metadata_sm, &len); + if (r < 0) + return r; + + r = dm_sm_copy_root(pmd->metadata_sm, &pmd->metadata_space_map_root, len); + if (r < 0) + return r; + + r = dm_sm_root_size(pmd->data_sm, &len); + if (r < 0) + return r; + + return dm_sm_copy_root(pmd->data_sm, &pmd->data_space_map_root, len); +} + +static void copy_sm_roots(struct dm_pool_metadata *pmd, + struct thin_disk_superblock *disk) +{ + memcpy(&disk->metadata_space_map_root, + &pmd->metadata_space_map_root, + sizeof(pmd->metadata_space_map_root)); + + memcpy(&disk->data_space_map_root, + &pmd->data_space_map_root, + sizeof(pmd->data_space_map_root)); +} + static int __write_initial_superblock(struct dm_pool_metadata *pmd) { int r; struct dm_block *sblock; - size_t metadata_len, data_len; struct thin_disk_superblock *disk_super; sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT; if (bdev_size > THIN_METADATA_MAX_SECTORS) bdev_size = THIN_METADATA_MAX_SECTORS; - r = dm_sm_root_size(pmd->metadata_sm, &metadata_len); - if (r < 0) - return r; - - r = dm_sm_root_size(pmd->data_sm, &data_len); + r = dm_sm_commit(pmd->data_sm); if (r < 0) return r; - r = dm_sm_commit(pmd->data_sm); + r = save_sm_roots(pmd); if (r < 0) return r; @@ -471,15 +505,7 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd) disk_super->trans_id = 0; disk_super->held_root = 0; - r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root, - metadata_len); - if (r < 0) - goto bad_locked; - - r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root, - data_len); - if (r < 0) - goto bad_locked; + copy_sm_roots(pmd, disk_super); disk_super->data_mapping_root = cpu_to_le64(pmd->root); disk_super->device_details_root = cpu_to_le64(pmd->details_root); @@ -488,10 +514,6 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd) disk_super->data_block_size = cpu_to_le32(pmd->data_block_size); return dm_tm_commit(pmd->tm, sblock); - -bad_locked: - dm_bm_unlock(sblock); - return r; } static int __format_metadata(struct dm_pool_metadata *pmd) @@ -769,6 +791,10 @@ static int __commit_transaction(struct dm_pool_metadata *pmd) if (r < 0) return r; + r = save_sm_roots(pmd); + if (r < 0) + return r; + r = superblock_lock(pmd, &sblock); if (r) return r; @@ -780,21 +806,9 @@ static int __commit_transaction(struct dm_pool_metadata *pmd) disk_super->trans_id = cpu_to_le64(pmd->trans_id); disk_super->flags = cpu_to_le32(pmd->flags); - r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root, - metadata_len); - if (r < 0) - goto out_locked; - - r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root, - data_len); - if (r < 0) - goto out_locked; + copy_sm_roots(pmd, disk_super); return dm_tm_commit(pmd->tm, sblock); - -out_locked: - dm_bm_unlock(sblock); - return r; } struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev, -- cgit v0.10.2 From 473c36dfeecf4e49db928f3284b2fbe981f8c284 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Thu, 13 Feb 2014 13:43:32 -0500 Subject: dm: make dm_table_alloc_md_mempools static Make the function dm_table_alloc_md_mempools static because it is not called from another file. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 6a7f2b8..2ae35b2 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -945,7 +945,7 @@ bool dm_table_request_based(struct dm_table *t) return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; } -int dm_table_alloc_md_mempools(struct dm_table *t) +static int dm_table_alloc_md_mempools(struct dm_table *t) { unsigned type = dm_table_get_type(t); unsigned per_bio_data_size = 0; diff --git a/drivers/md/dm.h b/drivers/md/dm.h index c4569f0..88cc58c 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -73,7 +73,6 @@ unsigned dm_table_get_type(struct dm_table *t); struct target_type *dm_table_get_immutable_target_type(struct dm_table *t); bool dm_table_request_based(struct dm_table *t); bool dm_table_supports_discards(struct dm_table *t); -int dm_table_alloc_md_mempools(struct dm_table *t); void dm_table_free_md_mempools(struct dm_table *t); struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); -- cgit v0.10.2 From d70ab4fb723cf7acfa656cb2ad1e75be7ed94bef Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Mon, 3 Mar 2014 17:43:31 -0500 Subject: dm: remove dm_get_mapinfo Remove dm_get_mapinfo() because no target uses it. Targets can allocate per-bio data using ti->per_bio_data_size, this is much more flexible than union map_info. Leave union map_info only for the request-based multipath target's use. Also delete the unused "unsigned long long ll" field of union map_info. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 8c53b09..ef5750d 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -94,13 +94,6 @@ struct dm_rq_clone_bio_info { struct bio clone; }; -union map_info *dm_get_mapinfo(struct bio *bio) -{ - if (bio && bio->bi_private) - return &((struct dm_target_io *)bio->bi_private)->info; - return NULL; -} - union map_info *dm_get_rq_mapinfo(struct request *rq) { if (rq && rq->end_io_data) @@ -1195,7 +1188,6 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci, tio->io = ci->io; tio->ti = ti; - memset(&tio->info, 0, sizeof(tio->info)); tio->target_bio_nr = target_bio_nr; return tio; @@ -2873,8 +2865,6 @@ static const struct block_device_operations dm_blk_dops = { .owner = THIS_MODULE }; -EXPORT_SYMBOL(dm_get_mapinfo); - /* * module hooks */ diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index ed419c6..5eeeab4 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -23,7 +23,6 @@ typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; union map_info { void *ptr; - unsigned long long ll; }; /* @@ -291,7 +290,6 @@ struct dm_target_callbacks { struct dm_target_io { struct dm_io *io; struct dm_target *ti; - union map_info info; unsigned target_bio_nr; struct bio clone; }; @@ -403,7 +401,6 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); struct gendisk *dm_disk(struct mapped_device *md); int dm_suspended(struct dm_target *ti); int dm_noflush_suspending(struct dm_target *ti); -union map_info *dm_get_mapinfo(struct bio *bio); union map_info *dm_get_rq_mapinfo(struct request *rq); struct queue_limits *dm_get_queue_limits(struct mapped_device *md); -- cgit v0.10.2 From bfc6d41cee53b2d02edc469fa459000a448a90ab Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 4 Mar 2014 18:24:49 -0500 Subject: dm: stop using bi_private Device mapper uses the bio structure's bi_private field as a pointer to dm_target_io or dm_rq_clone_bio_info. But a bio structure is embedded in the dm_target_io and dm_rq_clone_bio_info structures, so the pointer to the structure that contains the bio can be found with the container_of() macro. Remove the use of bi_private and use container_of() instead. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ef5750d..0d52f6f 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -753,7 +753,7 @@ static void dec_pending(struct dm_io *io, int error) static void clone_endio(struct bio *bio, int error) { int r = 0; - struct dm_target_io *tio = bio->bi_private; + struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); struct dm_io *io = tio->io; struct mapped_device *md = tio->io->md; dm_endio_fn endio = tio->ti->type->end_io; @@ -787,7 +787,8 @@ static void clone_endio(struct bio *bio, int error) */ static void end_clone_bio(struct bio *clone, int error) { - struct dm_rq_clone_bio_info *info = clone->bi_private; + struct dm_rq_clone_bio_info *info = + container_of(clone, struct dm_rq_clone_bio_info, clone); struct dm_rq_target_io *tio = info->tio; struct bio *bio = info->orig; unsigned int nr_bytes = info->orig->bi_iter.bi_size; @@ -1113,7 +1114,6 @@ static void __map_bio(struct dm_target_io *tio) struct dm_target *ti = tio->ti; clone->bi_end_io = clone_endio; - clone->bi_private = tio; /* * Map the clone. If r == 0 we don't need to do @@ -1522,7 +1522,6 @@ static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, info->orig = bio_orig; info->tio = tio; bio->bi_end_io = end_clone_bio; - bio->bi_private = info; return 0; } -- cgit v0.10.2 From 9cdb8520049629271ad411ac91ab1bea3e1cfa2b Mon Sep 17 00:00:00 2001 From: Monam Agarwal Date: Sun, 23 Mar 2014 23:58:27 +0530 Subject: dm: use RCU_INIT_POINTER instead of rcu_assign_pointer in __unbind Replace rcu_assign_pointer(p, NULL) with RCU_INIT_POINTER(p, NULL). The rcu_assign_pointer() ensures that the initialization of a structure is carried out before storing a pointer to that structure. And in the case of the NULL pointer, there is no structure to initialize. So, rcu_assign_pointer(p, NULL) can be safely converted to RCU_INIT_POINTER(p, NULL). Signed-off-by: Monam Agarwal Signed-off-by: Mike Snitzer diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 0d52f6f..6382213 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2163,7 +2163,7 @@ static struct dm_table *__unbind(struct mapped_device *md) return NULL; dm_table_event_callback(map, NULL, NULL); - rcu_assign_pointer(md->map, NULL); + RCU_INIT_POINTER(md->map, NULL); dm_sync_table(md); return map; -- cgit v0.10.2 From 17f4ff45b58742e2cb32fce6e406dbdb4b32a1e7 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Fri, 28 Feb 2014 15:33:42 +0100 Subject: dm mpath: do not call pg_init when it is already running This patch moves condition checks as a preparation of following patches and has no effect on behaviour. process_queued_ios() is the only caller of __pg_init_all_paths() and 2 condition checks are moved from outside to inside without side effects. Signed-off-by: Hannes Reinecke Signed-off-by: Mike Snitzer Reviewed-by: Jun'ichi Nomura diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 422a9fd..c78e6a9 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -261,6 +261,9 @@ static void __pg_init_all_paths(struct multipath *m) struct pgpath *pgpath; unsigned long pg_init_delay = 0; + if (m->pg_init_in_progress || m->pg_init_disabled) + return; + m->pg_init_count++; m->pg_init_required = 0; if (m->pg_init_delay_retry) @@ -501,8 +504,7 @@ static void process_queued_ios(struct work_struct *work) (!pgpath && !m->queue_if_no_path)) must_queue = 0; - if (m->pg_init_required && !m->pg_init_in_progress && pgpath && - !m->pg_init_disabled) + if (pgpath && m->pg_init_required) __pg_init_all_paths(m); spin_unlock_irqrestore(&m->lock, flags); -- cgit v0.10.2 From 9974fa2c6a7d470ca3c201fe7dbac64bf4dd8d2a Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Fri, 28 Feb 2014 15:33:43 +0100 Subject: dm table: add dm_table_run_md_queue_async Introduce dm_table_run_md_queue_async() to run the request_queue of the mapped_device associated with a request-based DM table. Also add dm_md_get_queue() wrapper to extract the request_queue from a mapped_device. Signed-off-by: Mike Snitzer Signed-off-by: Hannes Reinecke Reviewed-by: Jun'ichi Nomura diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 2ae35b2..50601ec 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1618,6 +1618,25 @@ struct mapped_device *dm_table_get_md(struct dm_table *t) } EXPORT_SYMBOL(dm_table_get_md); +void dm_table_run_md_queue_async(struct dm_table *t) +{ + struct mapped_device *md; + struct request_queue *queue; + unsigned long flags; + + if (!dm_table_request_based(t)) + return; + + md = dm_table_get_md(t); + queue = dm_get_md_queue(md); + if (queue) { + spin_lock_irqsave(queue->queue_lock, flags); + blk_run_queue_async(queue); + spin_unlock_irqrestore(queue->queue_lock, flags); + } +} +EXPORT_SYMBOL(dm_table_run_md_queue_async); + static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6382213..455e649 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -468,6 +468,11 @@ sector_t dm_get_size(struct mapped_device *md) return get_capacity(md->disk); } +struct request_queue *dm_get_md_queue(struct mapped_device *md) +{ + return md->queue; +} + struct dm_stats *dm_get_stats(struct mapped_device *md) { return &md->stats; diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 88cc58c..ed76126 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -188,6 +188,7 @@ int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only int dm_cancel_deferred_remove(struct mapped_device *md); int dm_request_based(struct mapped_device *md); sector_t dm_get_size(struct mapped_device *md); +struct request_queue *dm_get_md_queue(struct mapped_device *md); struct dm_stats *dm_get_stats(struct mapped_device *md); int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 5eeeab4..63da56e 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -463,6 +463,11 @@ struct mapped_device *dm_table_get_md(struct dm_table *t); void dm_table_event(struct dm_table *t); /* + * Run the queue for request-based targets. + */ +void dm_table_run_md_queue_async(struct dm_table *t); + +/* * The device must be suspended before calling this method. * Returns the previous table, which the caller must destroy. */ -- cgit v0.10.2 From e809917735ebf1b9a56c24e877ce0d320baee2ec Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Fri, 28 Feb 2014 15:33:44 +0100 Subject: dm mpath: push back requests instead of queueing There is no reason why multipath needs to queue requests internally for queue_if_no_path or pg_init; we should rather push them back onto the request queue. And while we're at it we can simplify the conditional statement in map_io() to make it easier to read. Since mpath no longer does internal queuing of I/O the table info no longer emits the internal queue_size. Instead it displays 1 if queuing is being used or 0 if it is not. Signed-off-by: Hannes Reinecke Signed-off-by: Mike Snitzer Reviewed-by: Jun'ichi Nomura diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index c78e6a9..e1c3ed3 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -93,9 +93,7 @@ struct multipath { unsigned pg_init_count; /* Number of times pg_init called */ unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ - unsigned queue_size; struct work_struct process_queued_ios; - struct list_head queued_ios; struct work_struct trigger_event; @@ -124,6 +122,7 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd; static void process_queued_ios(struct work_struct *work); static void trigger_event(struct work_struct *work); static void activate_path(struct work_struct *work); +static int __pgpath_busy(struct pgpath *pgpath); /*----------------------------------------------- @@ -195,7 +194,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti) m = kzalloc(sizeof(*m), GFP_KERNEL); if (m) { INIT_LIST_HEAD(&m->priority_groups); - INIT_LIST_HEAD(&m->queued_ios); spin_lock_init(&m->lock); m->queue_io = 1; m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; @@ -368,12 +366,15 @@ failed: */ static int __must_push_back(struct multipath *m) { - return (m->queue_if_no_path != m->saved_queue_if_no_path && - dm_noflush_suspending(m->ti)); + return (m->queue_if_no_path || + (m->queue_if_no_path != m->saved_queue_if_no_path && + dm_noflush_suspending(m->ti))); } +#define pg_ready(m) (!(m)->queue_io && !(m)->pg_init_required) + static int map_io(struct multipath *m, struct request *clone, - union map_info *map_context, unsigned was_queued) + union map_info *map_context) { int r = DM_MAPIO_REMAPPED; size_t nr_bytes = blk_rq_bytes(clone); @@ -391,37 +392,28 @@ static int map_io(struct multipath *m, struct request *clone, pgpath = m->current_pgpath; - if (was_queued) - m->queue_size--; - - if (m->pg_init_required) { - if (!m->pg_init_in_progress) - queue_work(kmultipathd, &m->process_queued_ios); - r = DM_MAPIO_REQUEUE; - } else if ((pgpath && m->queue_io) || - (!pgpath && m->queue_if_no_path)) { - /* Queue for the daemon to resubmit */ - list_add_tail(&clone->queuelist, &m->queued_ios); - m->queue_size++; - if (!m->queue_io) - queue_work(kmultipathd, &m->process_queued_ios); - pgpath = NULL; - r = DM_MAPIO_SUBMITTED; - } else if (pgpath) { - bdev = pgpath->path.dev->bdev; - clone->q = bdev_get_queue(bdev); - clone->rq_disk = bdev->bd_disk; - } else if (__must_push_back(m)) - r = DM_MAPIO_REQUEUE; - else - r = -EIO; /* Failed */ - - mpio->pgpath = pgpath; - mpio->nr_bytes = nr_bytes; - - if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io) - pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path, - nr_bytes); + if (pgpath) { + if (pg_ready(m)) { + bdev = pgpath->path.dev->bdev; + clone->q = bdev_get_queue(bdev); + clone->rq_disk = bdev->bd_disk; + mpio->pgpath = pgpath; + mpio->nr_bytes = nr_bytes; + if (pgpath->pg->ps.type->start_io) + pgpath->pg->ps.type->start_io(&pgpath->pg->ps, + &pgpath->path, + nr_bytes); + } else { + __pg_init_all_paths(m); + r = DM_MAPIO_REQUEUE; + } + } else { + /* No path */ + if (__must_push_back(m)) + r = DM_MAPIO_REQUEUE; + else + r = -EIO; /* Failed */ + } spin_unlock_irqrestore(&m->lock, flags); @@ -443,7 +435,7 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path, else m->saved_queue_if_no_path = queue_if_no_path; m->queue_if_no_path = queue_if_no_path; - if (!m->queue_if_no_path && m->queue_size) + if (!m->queue_if_no_path) queue_work(kmultipathd, &m->process_queued_ios); spin_unlock_irqrestore(&m->lock, flags); @@ -451,40 +443,6 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path, return 0; } -/*----------------------------------------------------------------- - * The multipath daemon is responsible for resubmitting queued ios. - *---------------------------------------------------------------*/ - -static void dispatch_queued_ios(struct multipath *m) -{ - int r; - unsigned long flags; - union map_info *info; - struct request *clone, *n; - LIST_HEAD(cl); - - spin_lock_irqsave(&m->lock, flags); - list_splice_init(&m->queued_ios, &cl); - spin_unlock_irqrestore(&m->lock, flags); - - list_for_each_entry_safe(clone, n, &cl, queuelist) { - list_del_init(&clone->queuelist); - - info = dm_get_rq_mapinfo(clone); - - r = map_io(m, clone, info, 1); - if (r < 0) { - clear_mapinfo(m, info); - dm_kill_unmapped_request(clone, r); - } else if (r == DM_MAPIO_REMAPPED) - dm_dispatch_request(clone); - else if (r == DM_MAPIO_REQUEUE) { - clear_mapinfo(m, info); - dm_requeue_unmapped_request(clone); - } - } -} - static void process_queued_ios(struct work_struct *work) { struct multipath *m = @@ -509,7 +467,7 @@ static void process_queued_ios(struct work_struct *work) spin_unlock_irqrestore(&m->lock, flags); if (!must_queue) - dispatch_queued_ios(m); + dm_table_run_md_queue_async(m->ti->table); } /* @@ -987,7 +945,7 @@ static int multipath_map(struct dm_target *ti, struct request *clone, return DM_MAPIO_REQUEUE; clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; - r = map_io(m, clone, map_context, 0); + r = map_io(m, clone, map_context); if (r < 0 || r == DM_MAPIO_REQUEUE) clear_mapinfo(m, map_context); @@ -1056,7 +1014,7 @@ static int reinstate_path(struct pgpath *pgpath) pgpath->is_active = 1; - if (!m->nr_valid_paths++ && m->queue_size) { + if (!m->nr_valid_paths++) { m->current_pgpath = NULL; queue_work(kmultipathd, &m->process_queued_ios); } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { @@ -1435,7 +1393,7 @@ static void multipath_status(struct dm_target *ti, status_type_t type, /* Features */ if (type == STATUSTYPE_INFO) - DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count); + DMEMIT("2 %u %u ", m->queue_io, m->pg_init_count); else { DMEMIT("%u ", m->queue_if_no_path + (m->pg_init_retries > 0) * 2 + @@ -1686,7 +1644,7 @@ static int multipath_busy(struct dm_target *ti) spin_lock_irqsave(&m->lock, flags); /* pg_init in progress, requeue until done */ - if (m->pg_init_in_progress) { + if (!pg_ready(m)) { busy = 1; goto out; } @@ -1739,7 +1697,7 @@ out: *---------------------------------------------------------------*/ static struct target_type multipath_target = { .name = "multipath", - .version = {1, 6, 0}, + .version = {1, 7, 0}, .module = THIS_MODULE, .ctr = multipath_ctr, .dtr = multipath_dtr, -- cgit v0.10.2 From 3e9f1be1b4079bfe689ef6be5174f3177b3fd2aa Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Fri, 28 Feb 2014 15:33:45 +0100 Subject: dm mpath: remove process_queued_ios() process_queued_ios() has served 3 functions: 1) select pg and pgpath if none is selected 2) start pg_init if requested 3) dispatch queued IOs when pg is ready Basically, a call to queue_work(process_queued_ios) can be replaced by dm_table_run_md_queue_async(), which runs request queue and ends up calling map_io(), which does 1), 2) and 3). Exception is when !pg_ready() (which means either pg_init is running or requested), then multipath_busy() prevents map_io() being called from request_fn. If pg_init is running, it should be ok as long as pg_init_done() does the right thing when pg_init is completed, I.e.: restart pg_init if !pg_ready() or call dm_table_run_md_queue_async() to kick map_io(). If pg_init is requested, we have to make sure the request is detected and pg_init will be started. pg_init is requested in 3 places: a) __choose_pgpath() in map_io() b) __choose_pgpath() in multipath_ioctl() c) pg_init retry in pg_init_done() a) is ok because map_io() calls __pg_init_all_paths(), which does 2). b) needs a call to __pg_init_all_paths(), which does 2). c) needs a call to __pg_init_all_paths(), which does 2). So this patch removes process_queued_ios() and ensures that __pg_init_all_paths() is called at the appropriate locations. Signed-off-by: Hannes Reinecke Signed-off-by: Mike Snitzer Reviewed-by: Jun'ichi Nomura diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index e1c3ed3..1c6a3f8 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -93,8 +93,6 @@ struct multipath { unsigned pg_init_count; /* Number of times pg_init called */ unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ - struct work_struct process_queued_ios; - struct work_struct trigger_event; /* @@ -119,7 +117,6 @@ typedef int (*action_fn) (struct pgpath *pgpath); static struct kmem_cache *_mpio_cache; static struct workqueue_struct *kmultipathd, *kmpath_handlerd; -static void process_queued_ios(struct work_struct *work); static void trigger_event(struct work_struct *work); static void activate_path(struct work_struct *work); static int __pgpath_busy(struct pgpath *pgpath); @@ -197,7 +194,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti) spin_lock_init(&m->lock); m->queue_io = 1; m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; - INIT_WORK(&m->process_queued_ios, process_queued_ios); INIT_WORK(&m->trigger_event, trigger_event); init_waitqueue_head(&m->pg_init_wait); mutex_init(&m->work_mutex); @@ -254,16 +250,21 @@ static void clear_mapinfo(struct multipath *m, union map_info *info) * Path selection *-----------------------------------------------*/ -static void __pg_init_all_paths(struct multipath *m) +static int __pg_init_all_paths(struct multipath *m) { struct pgpath *pgpath; unsigned long pg_init_delay = 0; if (m->pg_init_in_progress || m->pg_init_disabled) - return; + return 0; m->pg_init_count++; m->pg_init_required = 0; + + /* Check here to reset pg_init_required */ + if (!m->current_pg) + return 0; + if (m->pg_init_delay_retry) pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ? m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS); @@ -275,6 +276,7 @@ static void __pg_init_all_paths(struct multipath *m) pg_init_delay)) m->pg_init_in_progress++; } + return m->pg_init_in_progress; } static void __switch_pg(struct multipath *m, struct pgpath *pgpath) @@ -436,40 +438,13 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path, m->saved_queue_if_no_path = queue_if_no_path; m->queue_if_no_path = queue_if_no_path; if (!m->queue_if_no_path) - queue_work(kmultipathd, &m->process_queued_ios); + dm_table_run_md_queue_async(m->ti->table); spin_unlock_irqrestore(&m->lock, flags); return 0; } -static void process_queued_ios(struct work_struct *work) -{ - struct multipath *m = - container_of(work, struct multipath, process_queued_ios); - struct pgpath *pgpath = NULL; - unsigned must_queue = 1; - unsigned long flags; - - spin_lock_irqsave(&m->lock, flags); - - if (!m->current_pgpath) - __choose_pgpath(m, 0); - - pgpath = m->current_pgpath; - - if ((pgpath && !m->queue_io) || - (!pgpath && !m->queue_if_no_path)) - must_queue = 0; - - if (pgpath && m->pg_init_required) - __pg_init_all_paths(m); - - spin_unlock_irqrestore(&m->lock, flags); - if (!must_queue) - dm_table_run_md_queue_async(m->ti->table); -} - /* * An event is triggered whenever a path is taken out of use. * Includes path failure and PG bypass. @@ -1016,7 +991,7 @@ static int reinstate_path(struct pgpath *pgpath) if (!m->nr_valid_paths++) { m->current_pgpath = NULL; - queue_work(kmultipathd, &m->process_queued_ios); + dm_table_run_md_queue_async(m->ti->table); } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) m->pg_init_in_progress++; @@ -1212,11 +1187,12 @@ static void pg_init_done(void *data, int errors) /* Activations of other paths are still on going */ goto out; - if (!m->pg_init_required) - m->queue_io = 0; - - m->pg_init_delay_retry = delay_retry; - queue_work(kmultipathd, &m->process_queued_ios); + if (m->pg_init_required) { + m->pg_init_delay_retry = delay_retry; + if (__pg_init_all_paths(m)) + goto out; + } + m->queue_io = 0; /* * Wake up any thread waiting to suspend. @@ -1592,8 +1568,17 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, r = err; } - if (r == -ENOTCONN && !fatal_signal_pending(current)) - queue_work(kmultipathd, &m->process_queued_ios); + if (r == -ENOTCONN && !fatal_signal_pending(current)) { + spin_lock_irqsave(&m->lock, flags); + if (!m->current_pg) { + /* Path status changed, redo selection */ + __choose_pgpath(m, 0); + } + if (m->pg_init_required) + __pg_init_all_paths(m); + spin_unlock_irqrestore(&m->lock, flags); + dm_table_run_md_queue_async(m->ti->table); + } return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); } -- cgit v0.10.2 From e3bde04f1ecef9d0508af9ea78421863744f552b Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Fri, 28 Feb 2014 15:33:46 +0100 Subject: dm mpath: reduce memory pressure when requeuing When multipath needs to requeue I/O in the block layer the per-request context shouldn't be allocated, as it will be freed immediately afterwards anyway. Avoiding this memory allocation will reduce memory pressure during requeuing. Signed-off-by: Hannes Reinecke Signed-off-by: Mike Snitzer Reviewed-by: Jun'ichi Nomura diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 1c6a3f8..2c2c33f 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -378,12 +378,12 @@ static int __must_push_back(struct multipath *m) static int map_io(struct multipath *m, struct request *clone, union map_info *map_context) { - int r = DM_MAPIO_REMAPPED; + int r = DM_MAPIO_REQUEUE; size_t nr_bytes = blk_rq_bytes(clone); unsigned long flags; struct pgpath *pgpath; struct block_device *bdev; - struct dm_mpath_io *mpio = map_context->ptr; + struct dm_mpath_io *mpio; spin_lock_irqsave(&m->lock, flags); @@ -396,27 +396,29 @@ static int map_io(struct multipath *m, struct request *clone, if (pgpath) { if (pg_ready(m)) { + if (set_mapinfo(m, map_context) < 0) + /* ENOMEM, requeue */ + goto out_unlock; + bdev = pgpath->path.dev->bdev; clone->q = bdev_get_queue(bdev); clone->rq_disk = bdev->bd_disk; + clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; + mpio = map_context->ptr; mpio->pgpath = pgpath; mpio->nr_bytes = nr_bytes; if (pgpath->pg->ps.type->start_io) pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path, nr_bytes); - } else { - __pg_init_all_paths(m); - r = DM_MAPIO_REQUEUE; + r = DM_MAPIO_REMAPPED; + goto out_unlock; } - } else { - /* No path */ - if (__must_push_back(m)) - r = DM_MAPIO_REQUEUE; - else - r = -EIO; /* Failed */ - } + __pg_init_all_paths(m); + } else if (!__must_push_back(m)) + r = -EIO; /* Failed */ +out_unlock: spin_unlock_irqrestore(&m->lock, flags); return r; @@ -912,19 +914,9 @@ static void multipath_dtr(struct dm_target *ti) static int multipath_map(struct dm_target *ti, struct request *clone, union map_info *map_context) { - int r; struct multipath *m = (struct multipath *) ti->private; - if (set_mapinfo(m, map_context) < 0) - /* ENOMEM, requeue */ - return DM_MAPIO_REQUEUE; - - clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; - r = map_io(m, clone, map_context); - if (r < 0 || r == DM_MAPIO_REQUEUE) - clear_mapinfo(m, map_context); - - return r; + return map_io(m, clone, map_context); } /* -- cgit v0.10.2 From 36fcffcc6500228efdfaf3a36761dd57a38366e3 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Fri, 28 Feb 2014 15:33:47 +0100 Subject: dm mpath: remove map_io() multipath_map() is now just a wrapper around map_io(), so we can rename map_io() to multipath_map(). Signed-off-by: Hannes Reinecke Signed-off-by: Mike Snitzer Reviewed-by: Jun'ichi Nomura diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 2c2c33f..07ca77f 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -375,9 +375,13 @@ static int __must_push_back(struct multipath *m) #define pg_ready(m) (!(m)->queue_io && !(m)->pg_init_required) -static int map_io(struct multipath *m, struct request *clone, - union map_info *map_context) +/* + * Map cloned requests + */ +static int multipath_map(struct dm_target *ti, struct request *clone, + union map_info *map_context) { + struct multipath *m = (struct multipath *) ti->private; int r = DM_MAPIO_REQUEUE; size_t nr_bytes = blk_rq_bytes(clone); unsigned long flags; @@ -909,17 +913,6 @@ static void multipath_dtr(struct dm_target *ti) } /* - * Map cloned requests - */ -static int multipath_map(struct dm_target *ti, struct request *clone, - union map_info *map_context) -{ - struct multipath *m = (struct multipath *) ti->private; - - return map_io(m, clone, map_context); -} - -/* * Take a path out of use. */ static int fail_path(struct pgpath *pgpath) -- cgit v0.10.2 From 9bf59a611a5eb479f321fae34adc9f948de0a42f Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Fri, 28 Feb 2014 15:33:48 +0100 Subject: dm mpath: remove extra nesting in map function Return early for case when no path exists, and when the pathgroup isn't ready. This eliminates the need for extra nesting for the the common case. Signed-off-by: Mike Snitzer Signed-off-by: Hannes Reinecke diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 07ca77f..e7d80ba 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -398,29 +398,31 @@ static int multipath_map(struct dm_target *ti, struct request *clone, pgpath = m->current_pgpath; - if (pgpath) { - if (pg_ready(m)) { - if (set_mapinfo(m, map_context) < 0) - /* ENOMEM, requeue */ - goto out_unlock; - - bdev = pgpath->path.dev->bdev; - clone->q = bdev_get_queue(bdev); - clone->rq_disk = bdev->bd_disk; - clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; - mpio = map_context->ptr; - mpio->pgpath = pgpath; - mpio->nr_bytes = nr_bytes; - if (pgpath->pg->ps.type->start_io) - pgpath->pg->ps.type->start_io(&pgpath->pg->ps, - &pgpath->path, - nr_bytes); - r = DM_MAPIO_REMAPPED; - goto out_unlock; - } + if (!pgpath) { + if (!__must_push_back(m)) + r = -EIO; /* Failed */ + goto out_unlock; + } + if (!pg_ready(m)) { __pg_init_all_paths(m); - } else if (!__must_push_back(m)) - r = -EIO; /* Failed */ + goto out_unlock; + } + if (set_mapinfo(m, map_context) < 0) + /* ENOMEM, requeue */ + goto out_unlock; + + bdev = pgpath->path.dev->bdev; + clone->q = bdev_get_queue(bdev); + clone->rq_disk = bdev->bd_disk; + clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; + mpio = map_context->ptr; + mpio->pgpath = pgpath; + mpio->nr_bytes = nr_bytes; + if (pgpath->pg->ps.type->start_io) + pgpath->pg->ps.type->start_io(&pgpath->pg->ps, + &pgpath->path, + nr_bytes); + r = DM_MAPIO_REMAPPED; out_unlock: spin_unlock_irqrestore(&m->lock, flags); -- cgit v0.10.2 From 3a0175096423856d145e210527271c9eec188a5f Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Fri, 28 Feb 2014 15:33:49 +0100 Subject: dm-mpath: do not activate failed paths activate_path() is run without a lock, so the path might be set to failed before activate_path() had a chance to run. This patch add a check for ->active in activate_path() to avoid unnecessary overhead by calling functions which are known to be failing. Signed-off-by: Hannes Reinecke Reviewed-by: Jun'ichi Nomura Signed-off-by: Mike Snitzer diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index e7d80ba..b1fb01e 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1195,8 +1195,11 @@ static void activate_path(struct work_struct *work) struct pgpath *pgpath = container_of(work, struct pgpath, activate_path.work); - scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), - pg_init_done, pgpath); + if (pgpath->is_active) + scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), + pg_init_done, pgpath); + else + pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED); } static int noretry_error(int error) -- cgit v0.10.2 From a356e42620271deb5afb6570606154d160783bba Mon Sep 17 00:00:00 2001 From: Jose Castillo Date: Wed, 29 Jan 2014 17:52:45 +0100 Subject: dm mpath: print more useful warnings in multipath_message() The warning message "Unrecognised multipath message received" is displayed in two different situations in multipath_message(): when the number of arguments passed is invalid and when the string passed in argv[0] is not recognized. Make it easier to identify where the problem is by making these warnings more specific with additional context for each case. Signed-off-by: Jose Castillo Signed-off-by: Mike Snitzer diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index b1fb01e..aa009e8 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1478,7 +1478,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) } if (argc != 2) { - DMWARN("Unrecognised multipath message received."); + DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc); goto out; } @@ -1496,7 +1496,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) else if (!strcasecmp(argv[0], "fail_path")) action = fail_path; else { - DMWARN("Unrecognised multipath message received."); + DMWARN("Unrecognised multipath message received: %s", argv[0]); goto out; } -- cgit v0.10.2 From fe76cd88e654124d1431bb662a0fc6e99ca811a5 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Fri, 28 Mar 2014 02:15:02 -0400 Subject: dm thin: fix dangling bio in process_deferred_bios error path If unable to ensure_next_mapping() we must add the current bio, which was removed from the @bios list via bio_list_pop, back to the deferred_bios list before all the remaining @bios. Signed-off-by: Mike Snitzer Acked-by: Joe Thornber Cc: stable@vger.kernel.org diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index be70d38..60cc506 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -1392,9 +1392,9 @@ static void process_deferred_bios(struct pool *pool) */ if (ensure_next_mapping(pool)) { spin_lock_irqsave(&pool->lock, flags); + bio_list_add(&pool->deferred_bios, bio); bio_list_merge(&pool->deferred_bios, &bios); spin_unlock_irqrestore(&pool->lock, flags); - break; } -- cgit v0.10.2 From 760fe67e539b2f1a95dbb4c9700140eccdb1c0c1 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Thu, 20 Mar 2014 08:36:47 -0400 Subject: dm thin: simplify pool_is_congested The pool is congested if the pool is in PM_OUT_OF_DATA_SPACE mode. This is more explicit/clear/efficient than inferring whether or not the pool is congested by checking if retry_on_resume_list is empty. Signed-off-by: Mike Snitzer Acked-by: Joe Thornber diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 60cc506..af871fd 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -1757,20 +1757,14 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits) { - int r; - unsigned long flags; struct pool_c *pt = container_of(cb, struct pool_c, callbacks); + struct request_queue *q; - spin_lock_irqsave(&pt->pool->lock, flags); - r = !bio_list_empty(&pt->pool->retry_on_resume_list); - spin_unlock_irqrestore(&pt->pool->lock, flags); + if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE) + return 1; - if (!r) { - struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); - r = bdi_congested(&q->backing_dev_info, bdi_bits); - } - - return r; + q = bdev_get_queue(pt->data_dev->bdev); + return bdi_congested(&q->backing_dev_info, bdi_bits); } static void __requeue_bios(struct pool *pool) -- cgit v0.10.2 From c140e1c4e23bdaf0a5c00b6a8b6d18f259d39a00 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Thu, 20 Mar 2014 21:17:14 -0400 Subject: dm thin: use per thin device deferred bio lists The thin-pool previously only had a single deferred_bios list that would collect bios for all thin devices in the pool. Split this per-pool deferred_bios list out to per-thin deferred_bios_list -- doing so enables increased parallelism when processing deferred bios. And now that each thin device has it's own deferred_bios_list we can sort all bios in the list using logical sector. The requeue code in error handling path is also cleaner as a side-effect. Signed-off-by: Mike Snitzer Acked-by: Joe Thornber diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index af871fd..08e62ae 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -178,12 +179,10 @@ struct pool { unsigned ref_count; spinlock_t lock; - struct bio_list deferred_bios; struct bio_list deferred_flush_bios; struct list_head prepared_mappings; struct list_head prepared_discards; - - struct bio_list retry_on_resume_list; + struct list_head active_thins; struct dm_deferred_set *shared_read_ds; struct dm_deferred_set *all_io_ds; @@ -220,6 +219,7 @@ struct pool_c { * Target context for a thin. */ struct thin_c { + struct list_head list; struct dm_dev *pool_dev; struct dm_dev *origin_dev; dm_thin_id dev_id; @@ -227,6 +227,9 @@ struct thin_c { struct pool *pool; struct dm_thin_device *td; bool requeue_mode:1; + spinlock_t lock; + struct bio_list deferred_bio_list; + struct bio_list retry_on_resume_list; }; /*----------------------------------------------------------------*/ @@ -287,9 +290,9 @@ static void cell_defer_no_holder_no_free(struct thin_c *tc, struct pool *pool = tc->pool; unsigned long flags; - spin_lock_irqsave(&pool->lock, flags); - dm_cell_release_no_holder(pool->prison, cell, &pool->deferred_bios); - spin_unlock_irqrestore(&pool->lock, flags); + spin_lock_irqsave(&tc->lock, flags); + dm_cell_release_no_holder(pool->prison, cell, &tc->deferred_bio_list); + spin_unlock_irqrestore(&tc->lock, flags); wake_worker(pool); } @@ -378,30 +381,22 @@ static void requeue_bio_list(struct thin_c *tc, struct bio_list *master) bio_list_init(&bios); - spin_lock_irqsave(&tc->pool->lock, flags); + spin_lock_irqsave(&tc->lock, flags); bio_list_merge(&bios, master); bio_list_init(master); - spin_unlock_irqrestore(&tc->pool->lock, flags); + spin_unlock_irqrestore(&tc->lock, flags); - while ((bio = bio_list_pop(&bios))) { - struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); - - if (h->tc == tc) - bio_endio(bio, DM_ENDIO_REQUEUE); - else - bio_list_add(master, bio); - } + while ((bio = bio_list_pop(&bios))) + bio_endio(bio, DM_ENDIO_REQUEUE); } static void requeue_io(struct thin_c *tc) { - struct pool *pool = tc->pool; - - requeue_bio_list(tc, &pool->deferred_bios); - requeue_bio_list(tc, &pool->retry_on_resume_list); + requeue_bio_list(tc, &tc->deferred_bio_list); + requeue_bio_list(tc, &tc->retry_on_resume_list); } -static void error_retry_list(struct pool *pool) +static void error_thin_retry_list(struct thin_c *tc) { struct bio *bio; unsigned long flags; @@ -409,15 +404,25 @@ static void error_retry_list(struct pool *pool) bio_list_init(&bios); - spin_lock_irqsave(&pool->lock, flags); - bio_list_merge(&bios, &pool->retry_on_resume_list); - bio_list_init(&pool->retry_on_resume_list); - spin_unlock_irqrestore(&pool->lock, flags); + spin_lock_irqsave(&tc->lock, flags); + bio_list_merge(&bios, &tc->retry_on_resume_list); + bio_list_init(&tc->retry_on_resume_list); + spin_unlock_irqrestore(&tc->lock, flags); while ((bio = bio_list_pop(&bios))) bio_io_error(bio); } +static void error_retry_list(struct pool *pool) +{ + struct thin_c *tc; + + rcu_read_lock(); + list_for_each_entry_rcu(tc, &pool->active_thins, list) + error_thin_retry_list(tc); + rcu_read_unlock(); +} + /* * This section of code contains the logic for processing a thin device's IO. * Much of the code depends on pool object resources (lists, workqueues, etc) @@ -608,9 +613,9 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell) struct pool *pool = tc->pool; unsigned long flags; - spin_lock_irqsave(&pool->lock, flags); - cell_release(pool, cell, &pool->deferred_bios); - spin_unlock_irqrestore(&tc->pool->lock, flags); + spin_lock_irqsave(&tc->lock, flags); + cell_release(pool, cell, &tc->deferred_bio_list); + spin_unlock_irqrestore(&tc->lock, flags); wake_worker(pool); } @@ -623,9 +628,9 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c struct pool *pool = tc->pool; unsigned long flags; - spin_lock_irqsave(&pool->lock, flags); - cell_release_no_holder(pool, cell, &pool->deferred_bios); - spin_unlock_irqrestore(&pool->lock, flags); + spin_lock_irqsave(&tc->lock, flags); + cell_release_no_holder(pool, cell, &tc->deferred_bio_list); + spin_unlock_irqrestore(&tc->lock, flags); wake_worker(pool); } @@ -1001,12 +1006,11 @@ static void retry_on_resume(struct bio *bio) { struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); struct thin_c *tc = h->tc; - struct pool *pool = tc->pool; unsigned long flags; - spin_lock_irqsave(&pool->lock, flags); - bio_list_add(&pool->retry_on_resume_list, bio); - spin_unlock_irqrestore(&pool->lock, flags); + spin_lock_irqsave(&tc->lock, flags); + bio_list_add(&tc->retry_on_resume_list, bio); + spin_unlock_irqrestore(&tc->lock, flags); } static bool should_error_unserviceable_bio(struct pool *pool) @@ -1363,38 +1367,36 @@ static int need_commit_due_to_time(struct pool *pool) jiffies > pool->last_commit_jiffies + COMMIT_PERIOD; } -static void process_deferred_bios(struct pool *pool) +static void process_thin_deferred_bios(struct thin_c *tc) { + struct pool *pool = tc->pool; unsigned long flags; struct bio *bio; struct bio_list bios; + if (tc->requeue_mode) { + requeue_bio_list(tc, &tc->deferred_bio_list); + return; + } + bio_list_init(&bios); - spin_lock_irqsave(&pool->lock, flags); - bio_list_merge(&bios, &pool->deferred_bios); - bio_list_init(&pool->deferred_bios); - spin_unlock_irqrestore(&pool->lock, flags); + spin_lock_irqsave(&tc->lock, flags); + bio_list_merge(&bios, &tc->deferred_bio_list); + bio_list_init(&tc->deferred_bio_list); + spin_unlock_irqrestore(&tc->lock, flags); while ((bio = bio_list_pop(&bios))) { - struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); - struct thin_c *tc = h->tc; - - if (tc->requeue_mode) { - bio_endio(bio, DM_ENDIO_REQUEUE); - continue; - } - /* * If we've got no free new_mapping structs, and processing * this bio might require one, we pause until there are some * prepared mappings to process. */ if (ensure_next_mapping(pool)) { - spin_lock_irqsave(&pool->lock, flags); - bio_list_add(&pool->deferred_bios, bio); - bio_list_merge(&pool->deferred_bios, &bios); - spin_unlock_irqrestore(&pool->lock, flags); + spin_lock_irqsave(&tc->lock, flags); + bio_list_add(&tc->deferred_bio_list, bio); + bio_list_merge(&tc->deferred_bio_list, &bios); + spin_unlock_irqrestore(&tc->lock, flags); break; } @@ -1403,6 +1405,19 @@ static void process_deferred_bios(struct pool *pool) else pool->process_bio(tc, bio); } +} + +static void process_deferred_bios(struct pool *pool) +{ + unsigned long flags; + struct bio *bio; + struct bio_list bios; + struct thin_c *tc; + + rcu_read_lock(); + list_for_each_entry_rcu(tc, &pool->active_thins, list) + process_thin_deferred_bios(tc); + rcu_read_unlock(); /* * If there are any deferred flush bios, we must commit @@ -1634,9 +1649,9 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio) unsigned long flags; struct pool *pool = tc->pool; - spin_lock_irqsave(&pool->lock, flags); - bio_list_add(&pool->deferred_bios, bio); - spin_unlock_irqrestore(&pool->lock, flags); + spin_lock_irqsave(&tc->lock, flags); + bio_list_add(&tc->deferred_bio_list, bio); + spin_unlock_irqrestore(&tc->lock, flags); wake_worker(pool); } @@ -1767,10 +1782,19 @@ static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits) return bdi_congested(&q->backing_dev_info, bdi_bits); } -static void __requeue_bios(struct pool *pool) +static void requeue_bios(struct pool *pool) { - bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list); - bio_list_init(&pool->retry_on_resume_list); + unsigned long flags; + struct thin_c *tc; + + rcu_read_lock(); + list_for_each_entry_rcu(tc, &pool->active_thins, list) { + spin_lock_irqsave(&tc->lock, flags); + bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list); + bio_list_init(&tc->retry_on_resume_list); + spin_unlock_irqrestore(&tc->lock, flags); + } + rcu_read_unlock(); } /*---------------------------------------------------------------- @@ -1951,12 +1975,11 @@ static struct pool *pool_create(struct mapped_device *pool_md, INIT_WORK(&pool->worker, do_worker); INIT_DELAYED_WORK(&pool->waker, do_waker); spin_lock_init(&pool->lock); - bio_list_init(&pool->deferred_bios); bio_list_init(&pool->deferred_flush_bios); INIT_LIST_HEAD(&pool->prepared_mappings); INIT_LIST_HEAD(&pool->prepared_discards); + INIT_LIST_HEAD(&pool->active_thins); pool->low_water_triggered = false; - bio_list_init(&pool->retry_on_resume_list); pool->shared_read_ds = dm_deferred_set_create(); if (!pool->shared_read_ds) { @@ -2501,8 +2524,8 @@ static void pool_resume(struct dm_target *ti) spin_lock_irqsave(&pool->lock, flags); pool->low_water_triggered = false; - __requeue_bios(pool); spin_unlock_irqrestore(&pool->lock, flags); + requeue_bios(pool); do_waker(&pool->waker.work); } @@ -2962,6 +2985,12 @@ static struct target_type pool_target = { static void thin_dtr(struct dm_target *ti) { struct thin_c *tc = ti->private; + unsigned long flags; + + spin_lock_irqsave(&tc->pool->lock, flags); + list_del_rcu(&tc->list); + spin_unlock_irqrestore(&tc->pool->lock, flags); + synchronize_rcu(); mutex_lock(&dm_thin_pool_table.mutex); @@ -3008,6 +3037,9 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) r = -ENOMEM; goto out_unlock; } + spin_lock_init(&tc->lock); + bio_list_init(&tc->deferred_bio_list); + bio_list_init(&tc->retry_on_resume_list); if (argc == 3) { r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev); @@ -3079,6 +3111,17 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) mutex_unlock(&dm_thin_pool_table.mutex); + spin_lock(&tc->pool->lock); + list_add_tail_rcu(&tc->list, &tc->pool->active_thins); + spin_unlock(&tc->pool->lock); + /* + * This synchronize_rcu() call is needed here otherwise we risk a + * wake_worker() call finding no bios to process (because the newly + * added tc isn't yet visible). So this reduces latency since we + * aren't then dependent on the periodic commit to wake_worker(). + */ + synchronize_rcu(); + return 0; bad_target_max_io_len: -- cgit v0.10.2 From 67324ea18812bc952ef96892fbd5817b9050413f Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Fri, 21 Mar 2014 18:33:41 -0400 Subject: dm thin: sort the per thin deferred bios using an rb_tree A thin-pool will allocate blocks using FIFO order for all thin devices which share the thin-pool. Because of this simplistic allocation the thin-pool's space can become fragmented quite easily; especially when multiple threads are requesting blocks in parallel. Sort each thin device's deferred_bio_list based on logical sector to help reduce fragmentation of the thin-pool's ondisk layout. The following tables illustrate the realized gains/potential offered by sorting each thin device's deferred_bio_list. An "io size"-sized random read of the device would result in "seeks/io" fragments being read, with an average "distance/seek" between each fragment. Data was written to a single thin device using multiple threads via iozone (8 threads, 64K for both the block_size and io_size). unsorted: io size seeks/io distance/seek -------------------------------------- 4k 0.000 0b 16k 0.013 11m 64k 0.065 11m 256k 0.274 10m 1m 1.109 10m 4m 4.411 10m 16m 17.097 11m 64m 60.055 13m 256m 148.798 25m 1g 809.929 21m sorted: io size seeks/io distance/seek -------------------------------------- 4k 0.000 0b 16k 0.000 1g 64k 0.001 1g 256k 0.003 1g 1m 0.011 1g 4m 0.045 1g 16m 0.181 1g 64m 0.747 1011m 256m 3.299 1g 1g 14.373 1g Signed-off-by: Mike Snitzer Acked-by: Joe Thornber diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 08e62ae..53728be 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -16,6 +16,7 @@ #include #include #include +#include #define DM_MSG_PREFIX "thin" @@ -230,6 +231,7 @@ struct thin_c { spinlock_t lock; struct bio_list deferred_bio_list; struct bio_list retry_on_resume_list; + struct rb_root sort_bio_list; /* sorted list of deferred bios */ }; /*----------------------------------------------------------------*/ @@ -371,6 +373,7 @@ struct dm_thin_endio_hook { struct dm_deferred_entry *shared_read_entry; struct dm_deferred_entry *all_io_entry; struct dm_thin_new_mapping *overwrite_mapping; + struct rb_node rb_node; }; static void requeue_bio_list(struct thin_c *tc, struct bio_list *master) @@ -1367,12 +1370,77 @@ static int need_commit_due_to_time(struct pool *pool) jiffies > pool->last_commit_jiffies + COMMIT_PERIOD; } +#define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node) +#define thin_bio(pbd) dm_bio_from_per_bio_data((pbd), sizeof(struct dm_thin_endio_hook)) + +static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio) +{ + struct rb_node **rbp, *parent; + struct dm_thin_endio_hook *pbd; + sector_t bi_sector = bio->bi_iter.bi_sector; + + rbp = &tc->sort_bio_list.rb_node; + parent = NULL; + while (*rbp) { + parent = *rbp; + pbd = thin_pbd(parent); + + if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector) + rbp = &(*rbp)->rb_left; + else + rbp = &(*rbp)->rb_right; + } + + pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); + rb_link_node(&pbd->rb_node, parent, rbp); + rb_insert_color(&pbd->rb_node, &tc->sort_bio_list); +} + +static void __extract_sorted_bios(struct thin_c *tc) +{ + struct rb_node *node; + struct dm_thin_endio_hook *pbd; + struct bio *bio; + + for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) { + pbd = thin_pbd(node); + bio = thin_bio(pbd); + + bio_list_add(&tc->deferred_bio_list, bio); + rb_erase(&pbd->rb_node, &tc->sort_bio_list); + } + + WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list)); +} + +static void __sort_thin_deferred_bios(struct thin_c *tc) +{ + struct bio *bio; + struct bio_list bios; + + bio_list_init(&bios); + bio_list_merge(&bios, &tc->deferred_bio_list); + bio_list_init(&tc->deferred_bio_list); + + /* Sort deferred_bio_list using rb-tree */ + while ((bio = bio_list_pop(&bios))) + __thin_bio_rb_add(tc, bio); + + /* + * Transfer the sorted bios in sort_bio_list back to + * deferred_bio_list to allow lockless submission of + * all bios. + */ + __extract_sorted_bios(tc); +} + static void process_thin_deferred_bios(struct thin_c *tc) { struct pool *pool = tc->pool; unsigned long flags; struct bio *bio; struct bio_list bios; + struct blk_plug plug; if (tc->requeue_mode) { requeue_bio_list(tc, &tc->deferred_bio_list); @@ -1382,10 +1450,20 @@ static void process_thin_deferred_bios(struct thin_c *tc) bio_list_init(&bios); spin_lock_irqsave(&tc->lock, flags); + + if (bio_list_empty(&tc->deferred_bio_list)) { + spin_unlock_irqrestore(&tc->lock, flags); + return; + } + + __sort_thin_deferred_bios(tc); + bio_list_merge(&bios, &tc->deferred_bio_list); bio_list_init(&tc->deferred_bio_list); + spin_unlock_irqrestore(&tc->lock, flags); + blk_start_plug(&plug); while ((bio = bio_list_pop(&bios))) { /* * If we've got no free new_mapping structs, and processing @@ -1405,6 +1483,7 @@ static void process_thin_deferred_bios(struct thin_c *tc) else pool->process_bio(tc, bio); } + blk_finish_plug(&plug); } static void process_deferred_bios(struct pool *pool) @@ -2964,7 +3043,7 @@ static struct target_type pool_target = { .name = "thin-pool", .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | DM_TARGET_IMMUTABLE, - .version = {1, 11, 0}, + .version = {1, 12, 0}, .module = THIS_MODULE, .ctr = pool_ctr, .dtr = pool_dtr, @@ -3040,6 +3119,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) spin_lock_init(&tc->lock); bio_list_init(&tc->deferred_bio_list); bio_list_init(&tc->retry_on_resume_list); + tc->sort_bio_list = RB_ROOT; if (argc == 3) { r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev); @@ -3287,7 +3367,7 @@ static int thin_iterate_devices(struct dm_target *ti, static struct target_type thin_target = { .name = "thin", - .version = {1, 11, 0}, + .version = {1, 12, 0}, .module = THIS_MODULE, .ctr = thin_ctr, .dtr = thin_dtr, -- cgit v0.10.2 From 0596661f0a16d9d69bf1033320e70b6ff52b5e81 Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Thu, 3 Apr 2014 16:16:44 +0100 Subject: dm cache: fix a lock-inversion When suspending a cache the policy is walked and the individual policy hints written to the metadata via sync_metadata(). This led to this lock order: policy->lock cache_metadata->root_lock When loading the cache target the policy is populated while the metadata lock is held: cache_metadata->root_lock policy->lock Fix this potential lock-inversion (ABBA) deadlock in sync_metadata() by ensuring the cache_metadata root_lock is held whilst all the hints are written, rather than being repeatedly locked while policy->lock is held (as was the case with each callout that policy_walk_mappings() made to the old save_hint() method). Found by turning on the CONFIG_PROVE_LOCKING ("Lock debugging: prove locking correctness") build option. However, it is not clear how the LOCKDEP reported paths can lead to a deadlock since the two paths, suspending a target and loading a target, never occur at the same time. But that doesn't mean the same lock-inversion couldn't have occurred elsewhere. Reported-by: Marian Csontos Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer Cc: stable@vger.kernel.org diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 66f102a..4ead4ba 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -1245,22 +1245,12 @@ static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po return 0; } -int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy) +static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock, uint32_t hint) { + struct dm_cache_metadata *cmd = context; + __le32 value = cpu_to_le32(hint); int r; - down_write(&cmd->root_lock); - r = begin_hints(cmd, policy); - up_write(&cmd->root_lock); - - return r; -} - -static int save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock, - uint32_t hint) -{ - int r; - __le32 value = cpu_to_le32(hint); __dm_bless_for_disk(&value); r = dm_array_set_value(&cmd->hint_info, cmd->hint_root, @@ -1270,16 +1260,25 @@ static int save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock, return r; } -int dm_cache_save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock, - uint32_t hint) +static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy) { int r; - if (!hints_array_initialized(cmd)) - return 0; + r = begin_hints(cmd, policy); + if (r) { + DMERR("begin_hints failed"); + return r; + } + + return policy_walk_mappings(policy, save_hint, cmd); +} + +int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy) +{ + int r; down_write(&cmd->root_lock); - r = save_hint(cmd, cblock, hint); + r = write_hints(cmd, policy); up_write(&cmd->root_lock); return r; diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h index ce8468b..cd70a78 100644 --- a/drivers/md/dm-cache-metadata.h +++ b/drivers/md/dm-cache-metadata.h @@ -128,14 +128,7 @@ void dm_cache_dump(struct dm_cache_metadata *cmd); * rather than querying the policy for each cblock, we let it walk its data * structures and fill in the hints in whatever order it wishes. */ - -int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p); - -/* - * requests hints for every cblock and stores in the metadata device. - */ -int dm_cache_save_hint(struct dm_cache_metadata *cmd, - dm_cblock_t cblock, uint32_t hint); +int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p); /* * Query method. Are all the blocks in the cache clean? diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 8534679..1bf4a71 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -2582,30 +2582,6 @@ static int write_discard_bitset(struct cache *cache) return 0; } -static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock, - uint32_t hint) -{ - struct cache *cache = context; - return dm_cache_save_hint(cache->cmd, cblock, hint); -} - -static int write_hints(struct cache *cache) -{ - int r; - - r = dm_cache_begin_hints(cache->cmd, cache->policy); - if (r) { - DMERR("dm_cache_begin_hints failed"); - return r; - } - - r = policy_walk_mappings(cache->policy, save_hint, cache); - if (r) - DMERR("policy_walk_mappings failed"); - - return r; -} - /* * returns true on success */ @@ -2623,7 +2599,7 @@ static bool sync_metadata(struct cache *cache) save_stats(cache); - r3 = write_hints(cache); + r3 = dm_cache_write_hints(cache->cmd, cache->policy); if (r3) DMERR("could not write hints"); @@ -3094,7 +3070,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type cache_target = { .name = "cache", - .version = {1, 3, 0}, + .version = {1, 4, 0}, .module = THIS_MODULE, .ctr = cache_ctr, .dtr = cache_dtr, -- cgit v0.10.2