From f660f6066716b700148f60dba3461e65efff3123 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 10 Oct 2016 15:10:51 +0300 Subject: softirq: Display IRQ_POLL for irq-poll statistics This library was moved to the generic area and was renamed to irq-poll. Hence, update proc/softirqs output accordingly. Signed-off-by: Sagi Grimberg Reviewed-by: Johannes Thumshirn Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe diff --git a/kernel/softirq.c b/kernel/softirq.c index 1bf81ef..744fa61 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -58,7 +58,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp DEFINE_PER_CPU(struct task_struct *, ksoftirqd); const char * const softirq_to_name[NR_SOFTIRQS] = { - "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", + "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL", "TASKLET", "SCHED", "HRTIMER", "RCU" }; -- cgit v0.10.2 From b4a1278c78bc939b3e29c3ad21ceaa636b0ca8c8 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 20 Oct 2016 14:40:06 -0700 Subject: badblocks: badblocks_set/clear update unacked_exist When bandblocks_set acknowledges a range or badblocks_clear a range, it's possible all badblocks are acknowledged. We should update unacked_exist if this occurs. Signed-off-by: Shaohua Li Reviewed-by: Tomasz Majchrzak Tested-by: Tomasz Majchrzak Signed-off-by: Jens Axboe diff --git a/block/badblocks.c b/block/badblocks.c index 6610e28..6ebcef2 100644 --- a/block/badblocks.c +++ b/block/badblocks.c @@ -133,6 +133,26 @@ retry: } EXPORT_SYMBOL_GPL(badblocks_check); +static void badblocks_update_acked(struct badblocks *bb) +{ + u64 *p = bb->page; + int i; + bool unacked = false; + + if (!bb->unacked_exist) + return; + + for (i = 0; i < bb->count ; i++) { + if (!BB_ACK(p[i])) { + unacked = true; + break; + } + } + + if (!unacked) + bb->unacked_exist = 0; +} + /** * badblocks_set() - Add a range of bad blocks to the table. * @bb: the badblocks structure that holds all badblock information @@ -294,6 +314,8 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors, bb->changed = 1; if (!acknowledged) bb->unacked_exist = 1; + else + badblocks_update_acked(bb); write_sequnlock_irqrestore(&bb->lock, flags); return rv; @@ -401,6 +423,7 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors) } } + badblocks_update_acked(bb); bb->changed = 1; out: write_sequnlock_irq(&bb->lock); -- cgit v0.10.2 From 423221d1745b53656db896bd34646d09d620c759 Mon Sep 17 00:00:00 2001 From: "John W. Linville" Date: Mon, 24 Oct 2016 15:13:25 -0400 Subject: nbd: fix incorrect unlock of nbd->sock_lock in sock_shutdown Commit 0eadf37afc250 ("nbd: allow block mq to deal with timeouts") changed normal usage of nbd->sock_lock to use spin_lock/spin_unlock rather than the *_irq variants, but it missed this unlock in an error path. Found by Coverity, CID 1373871. Signed-off-by: John W. Linville Cc: Josef Bacik Cc: Jens Axboe Cc: Markus Pargmann Fixes: 0eadf37afc250 ("nbd: allow block mq to deal with timeouts") Signed-off-by: Jens Axboe diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index ba405b5..19a16b2 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -164,7 +164,7 @@ static void sock_shutdown(struct nbd_device *nbd) spin_lock(&nbd->sock_lock); if (!nbd->sock) { - spin_unlock_irq(&nbd->sock_lock); + spin_unlock(&nbd->sock_lock); return; } -- cgit v0.10.2 From 94d7dea448fae6cbb83395323c1d2fd7f19dc388 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Wed, 26 Oct 2016 16:57:15 +0800 Subject: block: flush: fix IO hang in case of flood fua req This patch fixes one issue reported by Kent, which can be triggered in bcachefs over sata disk. Actually it is a generic issue in block flush vs. blk-tag. Cc: Christoph Hellwig Reported-by: Kent Overstreet Signed-off-by: Ming Lei Signed-off-by: Jens Axboe diff --git a/block/blk-flush.c b/block/blk-flush.c index 6a14b68..3c882cb 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -343,6 +343,34 @@ static void flush_data_end_io(struct request *rq, int error) struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); /* + * Updating q->in_flight[] here for making this tag usable + * early. Because in blk_queue_start_tag(), + * q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and + * reserve tags for sync I/O. + * + * More importantly this way can avoid the following I/O + * deadlock: + * + * - suppose there are 40 fua requests comming to flush queue + * and queue depth is 31 + * - 30 rqs are scheduled then blk_queue_start_tag() can't alloc + * tag for async I/O any more + * - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT + * and flush_data_end_io() is called + * - the other rqs still can't go ahead if not updating + * q->in_flight[BLK_RW_ASYNC] here, meantime these rqs + * are held in flush data queue and make no progress of + * handling post flush rq + * - only after the post flush rq is handled, all these rqs + * can be completed + */ + + elv_completed_request(q, rq); + + /* for avoiding double accounting */ + rq->cmd_flags &= ~REQ_STARTED; + + /* * After populating an empty queue, kick it to avoid stall. Read * the comment in flush_end_io(). */ -- cgit v0.10.2 From 7fe311302f7d52601cd799ad508a6f92cb3d748d Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 27 Oct 2016 09:49:19 -0600 Subject: blk-mq: update hardware and software queues for sleeping alloc If we end up sleeping due to running out of requests, we should update the hardware and software queues in the map ctx structure. Otherwise we could end up having rq->mq_ctx point to the pre-sleep context, and risk corrupting ctx->rq_list since we'll be grabbing the wrong lock when inserting the request. Reported-by: Dave Jones Reported-by: Chris Mason Tested-by: Chris Mason Fixes: 63581af3f31e ("blk-mq: remove non-blocking pass in blk_mq_map_request") Signed-off-by: Jens Axboe diff --git a/block/blk-mq.c b/block/blk-mq.c index ddc2eed..f3d27a6 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1217,9 +1217,9 @@ static struct request *blk_mq_map_request(struct request_queue *q, blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx); rq = __blk_mq_alloc_request(&alloc_data, op, op_flags); - hctx->queued++; - data->hctx = hctx; - data->ctx = ctx; + data->hctx = alloc_data.hctx; + data->ctx = alloc_data.ctx; + data->hctx->queued++; return rq; } -- cgit v0.10.2