From df111be6310fc41d059a485368e3c51a684859c2 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 9 Aug 2012 10:33:26 -0700 Subject: rbd: check for overflow in rbd_get_num_segments() It is possible in rbd_get_num_segments() for an overflow to occur when adding the offset and length. This is easily avoided. Since the function returns an int and the one caller is already prepared to handle errors, have it return -ERANGE if overflow would occur. The overflow check would not work if a zero-length request was being tested, so short-circuit that case, returning 0 for the number of segments required. (This condition might be avoided elsewhere already, I don't know.) Have the caller end the request if either an error or 0 is returned. The returned value is passed to __blk_end_request_all(), meaning a 0 length request is not treated an error. Signed-off-by: Alex Elder Reviewed-by: Yehuda Sadeh diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 3475bb2..6da6990 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -50,6 +50,10 @@ #define SECTOR_SHIFT 9 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT) +/* It might be useful to have this defined elsewhere too */ + +#define U64_MAX ((u64) (~0ULL)) + #define RBD_DRV_NAME "rbd" #define RBD_DRV_NAME_LONG "rbd (rados block device)" @@ -691,8 +695,17 @@ static u64 rbd_get_segment(struct rbd_image_header *header, static int rbd_get_num_segments(struct rbd_image_header *header, u64 ofs, u64 len) { - u64 start_seg = ofs >> header->obj_order; - u64 end_seg = (ofs + len - 1) >> header->obj_order; + u64 start_seg; + u64 end_seg; + + if (!len) + return 0; + if (len - 1 > U64_MAX - ofs) + return -ERANGE; + + start_seg = ofs >> header->obj_order; + end_seg = (ofs + len - 1) >> header->obj_order; + return end_seg - start_seg + 1; } @@ -1515,6 +1528,12 @@ static void rbd_rq_fn(struct request_queue *q) size, (unsigned long long) blk_rq_pos(rq) * SECTOR_SIZE); num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size); + if (num_segs <= 0) { + spin_lock_irq(q->queue_lock); + __blk_end_request_all(rq, num_segs); + ceph_put_snap_context(snapc); + continue; + } coll = rbd_alloc_coll(num_segs); if (!coll) { spin_lock_irq(q->queue_lock); -- cgit v0.10.2