summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLars Ellenberg <lars.ellenberg@linbit.com>2012-07-24 08:12:36 (GMT)
committerPhilipp Reisner <philipp.reisner@linbit.com>2012-11-08 15:58:37 (GMT)
commit9a278a7906066a1b4f37fff9b5e27a92af0ca3ce (patch)
tree8dd98edd53bc8e0c17f2681e4418b96ffb7af736 /drivers
parent934722a2dbf87b43d39c787441e511157d5add94 (diff)
downloadlinux-9a278a7906066a1b4f37fff9b5e27a92af0ca3ce.tar.xz
drbd: allow read requests to be retried after force-detach
Sometimes, a lower level block device turns into a tar-pit, not completing requests at all, not even doing error completion. We can force-detach from such a tar-pit block device, either by disk-timeout, or by drbdadm detach --force. Queueing for retry only from the request destruction path (kref hit 0) makes it impossible to retry affected read requests from the peer, until the local IO completion happened, as the locally submitted bio holds a reference on the drbd request object. If we can only complete READs when the local completion finally happens, we would not need to force-detach in the first place. Instead, queue for retry where we otherwise had done the error completion. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/drbd/drbd_main.c23
-rw-r--r--drivers/block/drbd/drbd_req.c19
-rw-r--r--drivers/block/drbd/drbd_req.h1
3 files changed, 29 insertions, 14 deletions
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index d07cb31..c0acd86 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2216,12 +2216,25 @@ static void do_retry(struct work_struct *ws)
struct drbd_conf *mdev = req->w.mdev;
struct bio *bio = req->master_bio;
unsigned long start_time = req->start_time;
-
- /* We have exclusive access to this request object.
- * If it had not been RQ_POSTPONED, the code path which queued
- * it here would have completed and freed it already.
+ bool expected;
+
+ expected =
+ expect(atomic_read(&req->completion_ref) == 0) &&
+ expect(req->rq_state & RQ_POSTPONED) &&
+ expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
+ (req->rq_state & RQ_LOCAL_ABORTED) != 0);
+
+ if (!expected)
+ dev_err(DEV, "req=%p completion_ref=%d rq_state=%x\n",
+ req, atomic_read(&req->completion_ref),
+ req->rq_state);
+
+ /* We still need to put one kref associated with the
+ * "completion_ref" going zero in the code path that queued it
+ * here. The request object may still be referenced by a
+ * frozen local req->private_bio, in case we force-detached.
*/
- mempool_free(req, drbd_request_mempool);
+ kref_put(&req->kref, drbd_req_destroy);
/* A single suspended or otherwise blocking device may stall
* all others as well. Fortunately, this code path is to
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index f2ba43e..c45479a 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -92,7 +92,7 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
return req;
}
-static void drbd_req_destroy(struct kref *kref)
+void drbd_req_destroy(struct kref *kref)
{
struct drbd_request *req = container_of(kref, struct drbd_request, kref);
struct drbd_conf *mdev = req->w.mdev;
@@ -152,10 +152,7 @@ static void drbd_req_destroy(struct kref *kref)
}
}
- if (s & RQ_POSTPONED)
- drbd_restart_request(req);
- else
- mempool_free(req, drbd_request_mempool);
+ mempool_free(req, drbd_request_mempool);
}
static void wake_all_senders(struct drbd_tconn *tconn) {
@@ -292,10 +289,6 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
m->error = ok ? 0 : (error ?: -EIO);
m->bio = req->master_bio;
req->master_bio = NULL;
- } else {
- /* Assert that this will be drbd_req_destroy()ed
- * with this very invokation. */
- D_ASSERT(atomic_read(&req->kref.refcount) == 1);
}
}
@@ -320,6 +313,14 @@ static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_
/* else */
drbd_req_complete(req, m);
+
+ if (req->rq_state & RQ_POSTPONED) {
+ /* don't destroy the req object just yet,
+ * but queue it for retry */
+ drbd_restart_request(req);
+ return 0;
+ }
+
return 1;
}
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 90e5a1e..9611713 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -267,6 +267,7 @@ struct bio_and_error {
int error;
};
+extern void drbd_req_destroy(struct kref *kref);
extern void _req_may_be_done(struct drbd_request *req,
struct bio_and_error *m);
extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,