summaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-06-10 09:22:26 (GMT)
committerDavid S. Miller <davem@davemloft.net>2008-06-10 09:22:26 (GMT)
commit65b53e4cc90e59936733b3b95b9451d2ca47528d (patch)
tree29932718192962671c48c3fd1ea017a6112459e8 /block/blk-core.c
parent788c0a53164c05c5ccdb1472474372b72ba74644 (diff)
parent2e761e0532a784816e7e822dbaaece8c5d4be14d (diff)
downloadlinux-fsl-qoriq-65b53e4cc90e59936733b3b95b9451d2ca47528d.tar.xz
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/tg3.c drivers/net/wireless/rt2x00/rt2x00dev.c net/mac80211/ieee80211_i.h
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c37
1 files changed, 17 insertions, 20 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 6a9cc0d..1905aab 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -806,35 +806,32 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
rq = get_request(q, rw_flags, bio, GFP_NOIO);
while (!rq) {
DEFINE_WAIT(wait);
+ struct io_context *ioc;
struct request_list *rl = &q->rq;
prepare_to_wait_exclusive(&rl->wait[rw], &wait,
TASK_UNINTERRUPTIBLE);
- rq = get_request(q, rw_flags, bio, GFP_NOIO);
-
- if (!rq) {
- struct io_context *ioc;
+ blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
- blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
-
- __generic_unplug_device(q);
- spin_unlock_irq(q->queue_lock);
- io_schedule();
+ __generic_unplug_device(q);
+ spin_unlock_irq(q->queue_lock);
+ io_schedule();
- /*
- * After sleeping, we become a "batching" process and
- * will be able to allocate at least one request, and
- * up to a big batch of them for a small period time.
- * See ioc_batching, ioc_set_batching
- */
- ioc = current_io_context(GFP_NOIO, q->node);
- ioc_set_batching(q, ioc);
+ /*
+ * After sleeping, we become a "batching" process and
+ * will be able to allocate at least one request, and
+ * up to a big batch of them for a small period time.
+ * See ioc_batching, ioc_set_batching
+ */
+ ioc = current_io_context(GFP_NOIO, q->node);
+ ioc_set_batching(q, ioc);
- spin_lock_irq(q->queue_lock);
- }
+ spin_lock_irq(q->queue_lock);
finish_wait(&rl->wait[rw], &wait);
- }
+
+ rq = get_request(q, rw_flags, bio, GFP_NOIO);
+ };
return rq;
}