From 94007751bb02797ba87bac7aacee2731ac2039a3 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Sat, 10 Sep 2011 17:20:21 +1000 Subject: Avoid dereferencing a 'request_queue' after last close. On the last close of an 'md' device which as been stopped, the device is destroyed and in particular the request_queue is freed. The free is done in a separate thread so it might happen a short time later. __blkdev_put calls bdev_inode_switch_bdi *after* ->release has been called. Since commit f758eeabeb96f878c860e8f110f94ec8820822a9 bdev_inode_switch_bdi will dereference the 'old' bdi, which lives inside a request_queue, to get a spin lock. This causes the last close on an md device to sometime take a spin_lock which lives in freed memory - which results in an oops. So move the called to bdev_inode_switch_bdi before the call to ->release. Cc: Christoph Hellwig Cc: Hugh Dickins Cc: Andrew Morton Cc: Wu Fengguang Acked-by: Wu Fengguang Cc: stable@kernel.org Signed-off-by: NeilBrown diff --git a/fs/block_dev.c b/fs/block_dev.c index ff77262..95f786e 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1429,6 +1429,11 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) WARN_ON_ONCE(bdev->bd_holders); sync_blockdev(bdev); kill_bdev(bdev); + /* ->release can cause the old bdi to disappear, + * so must switch it out first + */ + bdev_inode_switch_bdi(bdev->bd_inode, + &default_backing_dev_info); } if (bdev->bd_contains == bdev) { if (disk->fops->release) @@ -1442,8 +1447,6 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) disk_put_part(bdev->bd_part); bdev->bd_part = NULL; bdev->bd_disk = NULL; - bdev_inode_switch_bdi(bdev->bd_inode, - &default_backing_dev_info); if (bdev != bdev->bd_contains) victim = bdev->bd_contains; bdev->bd_contains = NULL; -- cgit v0.10.2 From 19d5f834d6aff7efb1c9353523865c5bce869470 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Sat, 10 Sep 2011 17:21:17 +1000 Subject: md/raid10: unify handling of write completion. A write can complete at two different places: 1/ when the last member-device write completes, through raid10_end_write_request 2/ in make_request() when we remove the initial bias from ->remaining. These two should do exactly the same thing and the comment says they do, but they don't. So factor the correct code out into a function and call it in both places. This makes the code much more similar to RAID1. The difference is only significant if there is an error, and they usually take a while, so it is unlikely that there will be an error already when make_request is completing, so this is unlikely to cause real problems. Signed-off-by: NeilBrown diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 8b29cd4..f6873fc 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -337,6 +337,21 @@ static void close_write(r10bio_t *r10_bio) md_write_end(r10_bio->mddev); } +static void one_write_done(r10bio_t *r10_bio) +{ + if (atomic_dec_and_test(&r10_bio->remaining)) { + if (test_bit(R10BIO_WriteError, &r10_bio->state)) + reschedule_retry(r10_bio); + else { + close_write(r10_bio); + if (test_bit(R10BIO_MadeGood, &r10_bio->state)) + reschedule_retry(r10_bio); + else + raid_end_bio_io(r10_bio); + } + } +} + static void raid10_end_write_request(struct bio *bio, int error) { int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); @@ -387,17 +402,7 @@ static void raid10_end_write_request(struct bio *bio, int error) * Let's see if all mirrored write operations have finished * already. */ - if (atomic_dec_and_test(&r10_bio->remaining)) { - if (test_bit(R10BIO_WriteError, &r10_bio->state)) - reschedule_retry(r10_bio); - else { - close_write(r10_bio); - if (test_bit(R10BIO_MadeGood, &r10_bio->state)) - reschedule_retry(r10_bio); - else - raid_end_bio_io(r10_bio); - } - } + one_write_done(r10_bio); if (dec_rdev) rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); } @@ -1127,15 +1132,8 @@ retry_write: spin_unlock_irqrestore(&conf->device_lock, flags); } - if (atomic_dec_and_test(&r10_bio->remaining)) { - /* This matches the end of raid10_end_write_request() */ - bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, - r10_bio->sectors, - !test_bit(R10BIO_Degraded, &r10_bio->state), - 0); - md_write_end(mddev); - raid_end_bio_io(r10_bio); - } + /* Remove the bias on 'remaining' */ + one_write_done(r10_bio); /* In case raid10d snuck in to freeze_array */ wake_up(&conf->wait_barrier); -- cgit v0.10.2 From 079fa166a2874985ae58b2e21e26e1cbc91127d4 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Sat, 10 Sep 2011 17:21:23 +1000 Subject: md/raid1,10: Remove use-after-free bug in make_request. A single request to RAID1 or RAID10 might result in multiple requests if there are known bad blocks that need to be avoided. To detect if we need to submit another write request we test: if (sectors_handled < (bio->bi_size >> 9)) { However this is after we call **_write_done() so the 'bio' no longer belongs to us - the writes could have completed and the bio freed. So move the **_write_done call until after the test against bio->bi_size. This addresses https://bugzilla.kernel.org/show_bug.cgi?id=41862 Reported-by: Bruno Wolff III Tested-by: Bruno Wolff III Signed-off-by: NeilBrown diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 32323f0..f4622dd 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1099,12 +1099,11 @@ read_again: bio_list_add(&conf->pending_bio_list, mbio); spin_unlock_irqrestore(&conf->device_lock, flags); } - r1_bio_write_done(r1_bio); - - /* In case raid1d snuck in to freeze_array */ - wake_up(&conf->wait_barrier); - + /* Mustn't call r1_bio_write_done before this next test, + * as it could result in the bio being freed. + */ if (sectors_handled < (bio->bi_size >> 9)) { + r1_bio_write_done(r1_bio); /* We need another r1_bio. It has already been counted * in bio->bi_phys_segments */ @@ -1117,6 +1116,11 @@ read_again: goto retry_write; } + r1_bio_write_done(r1_bio); + + /* In case raid1d snuck in to freeze_array */ + wake_up(&conf->wait_barrier); + if (do_sync || !bitmap || !plugged) md_wakeup_thread(mddev->thread); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index f6873fc..d7a8468 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1132,13 +1132,12 @@ retry_write: spin_unlock_irqrestore(&conf->device_lock, flags); } - /* Remove the bias on 'remaining' */ - one_write_done(r10_bio); - - /* In case raid10d snuck in to freeze_array */ - wake_up(&conf->wait_barrier); + /* Don't remove the bias on 'remaining' (one_write_done) until + * after checking if we need to go around again. + */ if (sectors_handled < (bio->bi_size >> 9)) { + one_write_done(r10_bio); /* We need another r10_bio. It has already been counted * in bio->bi_phys_segments. */ @@ -1152,6 +1151,10 @@ retry_write: r10_bio->state = 0; goto retry_write; } + one_write_done(r10_bio); + + /* In case raid10d snuck in to freeze_array */ + wake_up(&conf->wait_barrier); if (do_sync || !mddev->bitmap || !plugged) md_wakeup_thread(mddev->thread); -- cgit v0.10.2 From 27a7b260f71439c40546b43588448faac01adb93 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Sat, 10 Sep 2011 17:21:28 +1000 Subject: md: Fix handling for devices from 2TB to 4TB in 0.90 metadata. 0.90 metadata uses an unsigned 32bit number to count the number of kilobytes used from each device. This should allow up to 4TB per device. However we multiply this by 2 (to get sectors) before casting to a larger type, so sizes above 2TB get truncated. Also we allow rdev->sectors to be larger than 4TB, so it is possible for the array to be resized larger than the metadata can handle. So make sure rdev->sectors never exceeds 4TB when 0.90 metadata is in used. Also the sanity check at the end of super_90_load should include level 1 as it used ->size too. (RAID0 and Linear don't use ->size at all). Reported-by: Pim Zandbergen Cc: stable@kernel.org Signed-off-by: NeilBrown diff --git a/drivers/md/md.c b/drivers/md/md.c index 3742ce8..5404b22 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1138,8 +1138,11 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version ret = 0; } rdev->sectors = rdev->sb_start; + /* Limit to 4TB as metadata cannot record more than that */ + if (rdev->sectors >= (2ULL << 32)) + rdev->sectors = (2ULL << 32) - 2; - if (rdev->sectors < sb->size * 2 && sb->level > 1) + if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) /* "this cannot possibly happen" ... */ ret = -EINVAL; @@ -1173,7 +1176,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) mddev->clevel[0] = 0; mddev->layout = sb->layout; mddev->raid_disks = sb->raid_disks; - mddev->dev_sectors = sb->size * 2; + mddev->dev_sectors = ((sector_t)sb->size) * 2; mddev->events = ev1; mddev->bitmap_info.offset = 0; mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; @@ -1415,6 +1418,11 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) rdev->sb_start = calc_dev_sboffset(rdev); if (!num_sectors || num_sectors > rdev->sb_start) num_sectors = rdev->sb_start; + /* Limit to 4TB as metadata cannot record more than that. + * 4TB == 2^32 KB, or 2*2^32 sectors. + */ + if (num_sectors >= (2ULL << 32)) + num_sectors = (2ULL << 32) - 2; md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, rdev->sb_page); md_super_wait(rdev->mddev); -- cgit v0.10.2