summaryrefslogtreecommitdiff
path: root/drivers/block/rbd.c
diff options
context:
space:
mode:
authorIlya Dryomov <idryomov@gmail.com>2016-09-20 12:23:17 (GMT)
committerIlya Dryomov <idryomov@gmail.com>2016-10-03 14:13:47 (GMT)
commit80de19122866d0a65f741e7ff2d5d20842d22d6b (patch)
treee840143ecf05346a1c33ef4f1110ddf4677d8f46 /drivers/block/rbd.c
parent0276dca6c1ecb9a665645ff573e70685a57759af (diff)
downloadlinux-80de19122866d0a65f741e7ff2d5d20842d22d6b.tar.xz
rbd: lock_on_read map option
Add a per-device option to acquire exclusive lock on reads (in addition to writes and discards). The use case is iSCSI, where it will be used to prevent execution of stale writes after the implicit failover. Signed-off-by: Ilya Dryomov <idryomov@gmail.com> Tested-by: Mike Christie <mchristi@redhat.com>
Diffstat (limited to 'drivers/block/rbd.c')
-rw-r--r--drivers/block/rbd.c13
1 files changed, 12 insertions, 1 deletions
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 35fc1da..cc4c9f4 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -795,6 +795,7 @@ enum {
/* string args above */
Opt_read_only,
Opt_read_write,
+ Opt_lock_on_read,
Opt_err
};
@@ -806,16 +807,19 @@ static match_table_t rbd_opts_tokens = {
{Opt_read_only, "ro"}, /* Alternate spelling */
{Opt_read_write, "read_write"},
{Opt_read_write, "rw"}, /* Alternate spelling */
+ {Opt_lock_on_read, "lock_on_read"},
{Opt_err, NULL}
};
struct rbd_options {
int queue_depth;
bool read_only;
+ bool lock_on_read;
};
#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
#define RBD_READ_ONLY_DEFAULT false
+#define RBD_LOCK_ON_READ_DEFAULT false
static int parse_rbd_opts_token(char *c, void *private)
{
@@ -851,6 +855,9 @@ static int parse_rbd_opts_token(char *c, void *private)
case Opt_read_write:
rbd_opts->read_only = false;
break;
+ case Opt_lock_on_read:
+ rbd_opts->lock_on_read = true;
+ break;
default:
/* libceph prints "bad option" msg */
return -EINVAL;
@@ -4105,7 +4112,7 @@ static void rbd_queue_workfn(struct work_struct *work)
u64 length = blk_rq_bytes(rq);
enum obj_operation_type op_type;
u64 mapping_size;
- bool must_be_locked = false;
+ bool must_be_locked;
int result;
if (rq->cmd_type != REQ_TYPE_FS) {
@@ -4168,6 +4175,9 @@ static void rbd_queue_workfn(struct work_struct *work)
snapc = rbd_dev->header.snapc;
ceph_get_snap_context(snapc);
must_be_locked = rbd_is_lock_supported(rbd_dev);
+ } else {
+ must_be_locked = rbd_dev->opts->lock_on_read &&
+ rbd_is_lock_supported(rbd_dev);
}
up_read(&rbd_dev->header_rwsem);
@@ -5757,6 +5767,7 @@ static int rbd_add_parse_args(const char *buf,
rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
+ rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
copts = ceph_parse_options(options, mon_addrs,
mon_addrs + mon_addrs_size - 1,