summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 22:22:26 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 22:22:26 (GMT)
commit9ea18f8cab5f1c36cdd0f09717e35ceb48c36a87 (patch)
tree0c8da7ac47cb59fe39f177ab0407f554aff77194 /drivers/md
parentcaf292ae5bb9d57198ce001d8b762f7abae3a94d (diff)
parent849c6e7746e4f6317ace6aa7d2fcdcd844e99ddb (diff)
downloadlinux-9ea18f8cab5f1c36cdd0f09717e35ceb48c36a87.tar.xz
Merge branch 'for-3.19/drivers' of git://git.kernel.dk/linux-block
Pull block layer driver updates from Jens Axboe: - NVMe updates: - The blk-mq conversion from Matias (and others) - A stack of NVMe bug fixes from the nvme tree, mostly from Keith. - Various bug fixes from me, fixing issues in both the blk-mq conversion and generic bugs. - Abort and CPU online fix from Sam. - Hot add/remove fix from Indraneel. - A couple of drbd fixes from the drbd team (Andreas, Lars, Philipp) - With the generic IO stat accounting from 3.19/core, converting md, bcache, and rsxx to use those. From Gu Zheng. - Boundary check for queue/irq mode for null_blk from Matias. Fixes cases where invalid values could be given, causing the device to hang. - The xen blkfront pull request, with two bug fixes from Vitaly. * 'for-3.19/drivers' of git://git.kernel.dk/linux-block: (56 commits) NVMe: fix race condition in nvme_submit_sync_cmd() NVMe: fix retry/error logic in nvme_queue_rq() NVMe: Fix FS mount issue (hot-remove followed by hot-add) NVMe: fix error return checking from blk_mq_alloc_request() NVMe: fix freeing of wrong request in abort path xen/blkfront: remove redundant flush_op xen/blkfront: improve protection against issuing unsupported REQ_FUA NVMe: Fix command setup on IO retry null_blk: boundary check queue_mode and irqmode block/rsxx: use generic io stats accounting functions to simplify io stat accounting md: use generic io stats accounting functions to simplify io stat accounting drbd: use generic io stats accounting functions to simplify io stat accounting md/bcache: use generic io stats accounting functions to simplify io stat accounting NVMe: Update module version major number NVMe: fail pci initialization if the device doesn't have any BARs NVMe: add ->exit_hctx() hook NVMe: make setup work for devices that don't do INTx NVMe: enable IO stats by default NVMe: nvme_submit_async_admin_req() must use atomic rq allocation NVMe: replace blk_put_request() with blk_mq_free_request() ...
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/request.c23
-rw-r--r--drivers/md/dm.c13
-rw-r--r--drivers/md/md.c6
3 files changed, 10 insertions, 32 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 62e6e98..ab43fad 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -601,13 +601,8 @@ static void request_endio(struct bio *bio, int error)
static void bio_complete(struct search *s)
{
if (s->orig_bio) {
- int cpu, rw = bio_data_dir(s->orig_bio);
- unsigned long duration = jiffies - s->start_time;
-
- cpu = part_stat_lock();
- part_round_stats(cpu, &s->d->disk->part0);
- part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
- part_stat_unlock();
+ generic_end_io_acct(bio_data_dir(s->orig_bio),
+ &s->d->disk->part0, s->start_time);
trace_bcache_request_end(s->d, s->orig_bio);
bio_endio(s->orig_bio, s->iop.error);
@@ -959,12 +954,9 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
struct search *s;
struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
- int cpu, rw = bio_data_dir(bio);
+ int rw = bio_data_dir(bio);
- cpu = part_stat_lock();
- part_stat_inc(cpu, &d->disk->part0, ios[rw]);
- part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
- part_stat_unlock();
+ generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0);
bio->bi_bdev = dc->bdev;
bio->bi_iter.bi_sector += dc->sb.data_offset;
@@ -1074,12 +1066,9 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
struct search *s;
struct closure *cl;
struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
- int cpu, rw = bio_data_dir(bio);
+ int rw = bio_data_dir(bio);
- cpu = part_stat_lock();
- part_stat_inc(cpu, &d->disk->part0, ios[rw]);
- part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
- part_stat_unlock();
+ generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0);
s = search_alloc(bio, d);
cl = &s->cl;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 8f37ed2..4c06585 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -605,13 +605,10 @@ static void end_io_acct(struct dm_io *io)
struct mapped_device *md = io->md;
struct bio *bio = io->bio;
unsigned long duration = jiffies - io->start_time;
- int pending, cpu;
+ int pending;
int rw = bio_data_dir(bio);
- cpu = part_stat_lock();
- part_round_stats(cpu, &dm_disk(md)->part0);
- part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
- part_stat_unlock();
+ generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time);
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
@@ -1651,16 +1648,12 @@ static void _dm_request(struct request_queue *q, struct bio *bio)
{
int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
- int cpu;
int srcu_idx;
struct dm_table *map;
map = dm_get_live_table(md, &srcu_idx);
- cpu = part_stat_lock();
- part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
- part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
- part_stat_unlock();
+ generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
/* if we're suspended, we have to queue this io for later */
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9233c71..056ccd2 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -247,7 +247,6 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
{
const int rw = bio_data_dir(bio);
struct mddev *mddev = q->queuedata;
- int cpu;
unsigned int sectors;
if (mddev == NULL || mddev->pers == NULL
@@ -284,10 +283,7 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
sectors = bio_sectors(bio);
mddev->pers->make_request(mddev, bio);
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
- part_stat_unlock();
+ generic_start_io_acct(rw, sectors, &mddev->gendisk->part0);
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
wake_up(&mddev->sb_wait);