summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-core.c27
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c15
-rw-r--r--block/blk-ioc.c2
-rw-r--r--block/blk-iopoll.c3
-rw-r--r--block/blk-mq-cpu.c14
-rw-r--r--block/blk-mq-cpumap.c10
-rw-r--r--block/blk-mq-sysfs.c31
-rw-r--r--block/blk-mq.c172
-rw-r--r--block/blk-mq.h3
-rw-r--r--block/blk-softirq.c19
-rw-r--r--block/cfq-iosched.c8
-rw-r--r--block/deadline-iosched.c8
-rw-r--r--block/partitions/atari.h4
-rw-r--r--block/partitions/efi.h9
-rw-r--r--block/partitions/karma.c3
17 files changed, 173 insertions, 159 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 4e491d9..b6e95b5 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -336,7 +336,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
* under queue_lock. If it's not pointing to @blkg now, it never
* will. Hint assignment itself can race safely.
*/
- if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
+ if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
rcu_assign_pointer(blkcg->blkg_hint, NULL);
/*
diff --git a/block/blk-core.c b/block/blk-core.c
index 4db2b32..34d7c19 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -693,20 +693,11 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
if (!uninit_q)
return NULL;
- uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
- if (!uninit_q->flush_rq)
- goto out_cleanup_queue;
-
q = blk_init_allocated_queue(uninit_q, rfn, lock);
if (!q)
- goto out_free_flush_rq;
- return q;
+ blk_cleanup_queue(uninit_q);
-out_free_flush_rq:
- kfree(uninit_q->flush_rq);
-out_cleanup_queue:
- blk_cleanup_queue(uninit_q);
- return NULL;
+ return q;
}
EXPORT_SYMBOL(blk_init_queue_node);
@@ -717,9 +708,13 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
if (!q)
return NULL;
- if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
+ q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
+ if (!q->flush_rq)
return NULL;
+ if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
+ goto fail;
+
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unprep_rq_fn = NULL;
@@ -742,12 +737,16 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
/* init elevator */
if (elevator_init(q, NULL)) {
mutex_unlock(&q->sysfs_lock);
- return NULL;
+ goto fail;
}
mutex_unlock(&q->sysfs_lock);
return q;
+
+fail:
+ kfree(q->flush_rq);
+ return NULL;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -2354,7 +2353,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
if (!req->bio)
return false;
- trace_block_rq_complete(req->q, req);
+ trace_block_rq_complete(req->q, req, nr_bytes);
/*
* For fs requests, rq is just carrier of independent bio's
diff --git a/block/blk-exec.c b/block/blk-exec.c
index c68613b..dbf4502 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
* be resued after dying flag is set
*/
if (q->mq_ops) {
- blk_mq_insert_request(q, rq, at_head, true);
+ blk_mq_insert_request(rq, at_head, true, false);
return;
}
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 66e2b69..43e6b47 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -137,17 +137,20 @@ static void mq_flush_run(struct work_struct *work)
rq = container_of(work, struct request, mq_flush_work);
memset(&rq->csd, 0, sizeof(rq->csd));
- blk_mq_run_request(rq, true, false);
+ blk_mq_insert_request(rq, false, true, false);
}
-static bool blk_flush_queue_rq(struct request *rq)
+static bool blk_flush_queue_rq(struct request *rq, bool add_front)
{
if (rq->q->mq_ops) {
INIT_WORK(&rq->mq_flush_work, mq_flush_run);
kblockd_schedule_work(rq->q, &rq->mq_flush_work);
return false;
} else {
- list_add_tail(&rq->queuelist, &rq->q->queue_head);
+ if (add_front)
+ list_add(&rq->queuelist, &rq->q->queue_head);
+ else
+ list_add_tail(&rq->queuelist, &rq->q->queue_head);
return true;
}
}
@@ -193,7 +196,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
case REQ_FSEQ_DATA:
list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
- queued = blk_flush_queue_rq(rq);
+ queued = blk_flush_queue_rq(rq, true);
break;
case REQ_FSEQ_DONE:
@@ -326,7 +329,7 @@ static bool blk_kick_flush(struct request_queue *q)
q->flush_rq->rq_disk = first_rq->rq_disk;
q->flush_rq->end_io = flush_end_io;
- return blk_flush_queue_rq(q->flush_rq);
+ return blk_flush_queue_rq(q->flush_rq, false);
}
static void flush_data_end_io(struct request *rq, int error)
@@ -411,7 +414,7 @@ void blk_insert_flush(struct request *rq)
if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
if (q->mq_ops) {
- blk_mq_run_request(rq, false, true);
+ blk_mq_insert_request(rq, false, false, true);
} else
list_add_tail(&rq->queuelist, &q->queue_head);
return;
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 242df01..1a27f45 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -68,7 +68,7 @@ static void ioc_destroy_icq(struct io_cq *icq)
* under queue_lock. If it's not pointing to @icq now, it never
* will. Hint assignment itself can race safely.
*/
- if (rcu_dereference_raw(ioc->icq_hint) == icq)
+ if (rcu_access_pointer(ioc->icq_hint) == icq)
rcu_assign_pointer(ioc->icq_hint, NULL);
ioc_exit_icq(icq);
diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
index 1855bf5..c11d24e 100644
--- a/block/blk-iopoll.c
+++ b/block/blk-iopoll.c
@@ -14,9 +14,6 @@
#include "blk.h"
-int blk_iopoll_enabled = 1;
-EXPORT_SYMBOL(blk_iopoll_enabled);
-
static unsigned int blk_iopoll_budget __read_mostly = 256;
static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c
index 3146bef..136ef86 100644
--- a/block/blk-mq-cpu.c
+++ b/block/blk-mq-cpu.c
@@ -11,7 +11,7 @@
#include "blk-mq.h"
static LIST_HEAD(blk_mq_cpu_notify_list);
-static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
+static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);
static int blk_mq_main_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
@@ -19,12 +19,12 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
unsigned int cpu = (unsigned long) hcpu;
struct blk_mq_cpu_notifier *notify;
- spin_lock(&blk_mq_cpu_notify_lock);
+ raw_spin_lock(&blk_mq_cpu_notify_lock);
list_for_each_entry(notify, &blk_mq_cpu_notify_list, list)
notify->notify(notify->data, action, cpu);
- spin_unlock(&blk_mq_cpu_notify_lock);
+ raw_spin_unlock(&blk_mq_cpu_notify_lock);
return NOTIFY_OK;
}
@@ -32,16 +32,16 @@ void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
{
BUG_ON(!notifier->notify);
- spin_lock(&blk_mq_cpu_notify_lock);
+ raw_spin_lock(&blk_mq_cpu_notify_lock);
list_add_tail(&notifier->list, &blk_mq_cpu_notify_list);
- spin_unlock(&blk_mq_cpu_notify_lock);
+ raw_spin_unlock(&blk_mq_cpu_notify_lock);
}
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
{
- spin_lock(&blk_mq_cpu_notify_lock);
+ raw_spin_lock(&blk_mq_cpu_notify_lock);
list_del(&notifier->list);
- spin_unlock(&blk_mq_cpu_notify_lock);
+ raw_spin_unlock(&blk_mq_cpu_notify_lock);
}
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index f872127..0979213 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -9,15 +9,6 @@
#include "blk.h"
#include "blk-mq.h"
-static void show_map(unsigned int *map, unsigned int nr)
-{
- int i;
-
- pr_info("blk-mq: CPU -> queue map\n");
- for_each_online_cpu(i)
- pr_info(" CPU%2u -> Queue %u\n", i, map[i]);
-}
-
static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
const int cpu)
{
@@ -85,7 +76,6 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
map[i] = map[first_sibling];
}
- show_map(map, nr_cpus);
free_cpumask_var(cpus);
return 0;
}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index b91ce75..b0ba264 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -244,6 +244,32 @@ static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
return blk_mq_tag_sysfs_show(hctx->tags, page);
}
+static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
+{
+ unsigned int i, queue_num, first = 1;
+ ssize_t ret = 0;
+
+ blk_mq_disable_hotplug();
+
+ for_each_online_cpu(i) {
+ queue_num = hctx->queue->mq_map[i];
+ if (queue_num != hctx->queue_num)
+ continue;
+
+ if (first)
+ ret += sprintf(ret + page, "%u", i);
+ else
+ ret += sprintf(ret + page, ", %u", i);
+
+ first = 0;
+ }
+
+ blk_mq_enable_hotplug();
+
+ ret += sprintf(ret + page, "\n");
+ return ret;
+}
+
static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
.attr = {.name = "dispatched", .mode = S_IRUGO },
.show = blk_mq_sysfs_dispatched_show,
@@ -294,6 +320,10 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
.attr = {.name = "tags", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_tags_show,
};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
+ .attr = {.name = "cpu_list", .mode = S_IRUGO },
+ .show = blk_mq_hw_sysfs_cpus_show,
+};
static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_queued.attr,
@@ -302,6 +332,7 @@ static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_pending.attr,
&blk_mq_hw_sysfs_ipi.attr,
&blk_mq_hw_sysfs_tags.attr,
+ &blk_mq_hw_sysfs_cpus.attr,
NULL,
};
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1fa9dd1..b1bcc61 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -73,8 +73,8 @@ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
set_bit(ctx->index_hw, hctx->ctx_map);
}
-static struct request *blk_mq_alloc_rq(struct blk_mq_hw_ctx *hctx, gfp_t gfp,
- bool reserved)
+static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
+ gfp_t gfp, bool reserved)
{
struct request *rq;
unsigned int tag;
@@ -193,12 +193,6 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
}
-static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
- gfp_t gfp, bool reserved)
-{
- return blk_mq_alloc_rq(hctx, gfp, reserved);
-}
-
static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
int rw, gfp_t gfp,
bool reserved)
@@ -289,38 +283,10 @@ void blk_mq_free_request(struct request *rq)
__blk_mq_free_request(hctx, ctx, rq);
}
-static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error)
-{
- if (error)
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
- else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
- error = -EIO;
-
- if (unlikely(rq->cmd_flags & REQ_QUIET))
- set_bit(BIO_QUIET, &bio->bi_flags);
-
- /* don't actually finish bio if it's part of flush sequence */
- if (!(rq->cmd_flags & REQ_FLUSH_SEQ))
- bio_endio(bio, error);
-}
-
-void blk_mq_end_io(struct request *rq, int error)
+bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes)
{
- struct bio *bio = rq->bio;
- unsigned int bytes = 0;
-
- trace_block_rq_complete(rq->q, rq);
-
- while (bio) {
- struct bio *next = bio->bi_next;
-
- bio->bi_next = NULL;
- bytes += bio->bi_iter.bi_size;
- blk_mq_bio_endio(rq, bio, error);
- bio = next;
- }
-
- blk_account_io_completion(rq, bytes);
+ if (blk_update_request(rq, error, blk_rq_bytes(rq)))
+ return true;
blk_account_io_done(rq);
@@ -328,8 +294,9 @@ void blk_mq_end_io(struct request *rq, int error)
rq->end_io(rq, error);
else
blk_mq_free_request(rq);
+ return false;
}
-EXPORT_SYMBOL(blk_mq_end_io);
+EXPORT_SYMBOL(blk_mq_end_io_partial);
static void __blk_mq_complete_request_remote(void *data)
{
@@ -353,7 +320,7 @@ void __blk_mq_complete_request(struct request *rq)
rq->csd.func = __blk_mq_complete_request_remote;
rq->csd.info = rq;
rq->csd.flags = 0;
- __smp_call_function_single(ctx->cpu, &rq->csd, 0);
+ smp_call_function_single_async(ctx->cpu, &rq->csd);
} else {
rq->q->softirq_done_fn(rq);
}
@@ -547,7 +514,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
LIST_HEAD(rq_list);
int bit, queued;
- if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->flags)))
+ if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
return;
hctx->run++;
@@ -636,7 +603,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
- if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->flags)))
+ if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
return;
if (!async)
@@ -656,7 +623,7 @@ void blk_mq_run_queues(struct request_queue *q, bool async)
queue_for_each_hw_ctx(q, hctx, i) {
if ((!blk_mq_hctx_has_pending(hctx) &&
list_empty_careful(&hctx->dispatch)) ||
- test_bit(BLK_MQ_S_STOPPED, &hctx->flags))
+ test_bit(BLK_MQ_S_STOPPED, &hctx->state))
continue;
blk_mq_run_hw_queue(hctx, async);
@@ -730,60 +697,27 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
blk_mq_add_timer(rq);
}
-void blk_mq_insert_request(struct request_queue *q, struct request *rq,
- bool at_head, bool run_queue)
+void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
+ bool async)
{
+ struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx;
- struct blk_mq_ctx *ctx, *current_ctx;
+ struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
+
+ current_ctx = blk_mq_get_ctx(q);
+ if (!cpu_online(ctx->cpu))
+ rq->mq_ctx = ctx = current_ctx;
- ctx = rq->mq_ctx;
hctx = q->mq_ops->map_queue(q, ctx->cpu);
- if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
+ if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
+ !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
blk_insert_flush(rq);
} else {
- current_ctx = blk_mq_get_ctx(q);
-
- if (!cpu_online(ctx->cpu)) {
- ctx = current_ctx;
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
- rq->mq_ctx = ctx;
- }
spin_lock(&ctx->lock);
__blk_mq_insert_request(hctx, rq, at_head);
spin_unlock(&ctx->lock);
-
- blk_mq_put_ctx(current_ctx);
- }
-
- if (run_queue)
- __blk_mq_run_hw_queue(hctx);
-}
-EXPORT_SYMBOL(blk_mq_insert_request);
-
-/*
- * This is a special version of blk_mq_insert_request to bypass FLUSH request
- * check. Should only be used internally.
- */
-void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
-{
- struct request_queue *q = rq->q;
- struct blk_mq_hw_ctx *hctx;
- struct blk_mq_ctx *ctx, *current_ctx;
-
- current_ctx = blk_mq_get_ctx(q);
-
- ctx = rq->mq_ctx;
- if (!cpu_online(ctx->cpu)) {
- ctx = current_ctx;
- rq->mq_ctx = ctx;
}
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
-
- /* ctx->cpu might be offline */
- spin_lock(&ctx->lock);
- __blk_mq_insert_request(hctx, rq, false);
- spin_unlock(&ctx->lock);
blk_mq_put_ctx(current_ctx);
@@ -926,6 +860,8 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ if (is_sync)
+ rw |= REQ_SYNC;
trace_block_getrq(q, bio, rw);
rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
if (likely(rq))
@@ -1058,8 +994,46 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
blk_mq_put_ctx(ctx);
}
-static void blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
- void (*init)(void *, struct blk_mq_hw_ctx *,
+static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
+ int (*init)(void *, struct blk_mq_hw_ctx *,
+ struct request *, unsigned int),
+ void *data)
+{
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < hctx->queue_depth; i++) {
+ struct request *rq = hctx->rqs[i];
+
+ ret = init(data, hctx, rq, i);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+int blk_mq_init_commands(struct request_queue *q,
+ int (*init)(void *, struct blk_mq_hw_ctx *,
+ struct request *, unsigned int),
+ void *data)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned int i;
+ int ret = 0;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ ret = blk_mq_init_hw_commands(hctx, init, data);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(blk_mq_init_commands);
+
+static void blk_mq_free_hw_commands(struct blk_mq_hw_ctx *hctx,
+ void (*free)(void *, struct blk_mq_hw_ctx *,
struct request *, unsigned int),
void *data)
{
@@ -1068,12 +1042,12 @@ static void blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
for (i = 0; i < hctx->queue_depth; i++) {
struct request *rq = hctx->rqs[i];
- init(data, hctx, rq, i);
+ free(data, hctx, rq, i);
}
}
-void blk_mq_init_commands(struct request_queue *q,
- void (*init)(void *, struct blk_mq_hw_ctx *,
+void blk_mq_free_commands(struct request_queue *q,
+ void (*free)(void *, struct blk_mq_hw_ctx *,
struct request *, unsigned int),
void *data)
{
@@ -1081,9 +1055,9 @@ void blk_mq_init_commands(struct request_queue *q,
unsigned int i;
queue_for_each_hw_ctx(q, hctx, i)
- blk_mq_init_hw_commands(hctx, init, data);
+ blk_mq_free_hw_commands(hctx, free, data);
}
-EXPORT_SYMBOL(blk_mq_init_commands);
+EXPORT_SYMBOL(blk_mq_free_commands);
static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx)
{
@@ -1494,6 +1468,16 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
return NOTIFY_OK;
}
+void blk_mq_disable_hotplug(void)
+{
+ mutex_lock(&all_q_mutex);
+}
+
+void blk_mq_enable_hotplug(void)
+{
+ mutex_unlock(&all_q_mutex);
+}
+
static int __init blk_mq_init(void)
{
blk_mq_cpu_init();
diff --git a/block/blk-mq.h b/block/blk-mq.h
index ed0035c..ebbe6ba 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -23,7 +23,6 @@ struct blk_mq_ctx {
};
void __blk_mq_complete_request(struct request *rq);
-void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_init_flush(struct request_queue *q);
void blk_mq_drain_queue(struct request_queue *q);
@@ -40,6 +39,8 @@ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
void blk_mq_cpu_init(void);
+void blk_mq_enable_hotplug(void);
+void blk_mq_disable_hotplug(void);
/*
* CPU -> queue mappings
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 57790c1..ebd6b6f 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -30,8 +30,8 @@ static void blk_done_softirq(struct softirq_action *h)
while (!list_empty(&local_list)) {
struct request *rq;
- rq = list_entry(local_list.next, struct request, csd.list);
- list_del_init(&rq->csd.list);
+ rq = list_entry(local_list.next, struct request, queuelist);
+ list_del_init(&rq->queuelist);
rq->q->softirq_done_fn(rq);
}
}
@@ -45,9 +45,14 @@ static void trigger_softirq(void *data)
local_irq_save(flags);
list = this_cpu_ptr(&blk_cpu_done);
- list_add_tail(&rq->csd.list, list);
+ /*
+ * We reuse queuelist for a list of requests to process. Since the
+ * queuelist is used by the block layer only for requests waiting to be
+ * submitted to the device it is unused now.
+ */
+ list_add_tail(&rq->queuelist, list);
- if (list->next == &rq->csd.list)
+ if (list->next == &rq->queuelist)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_restore(flags);
@@ -65,7 +70,7 @@ static int raise_blk_irq(int cpu, struct request *rq)
data->info = rq;
data->flags = 0;
- __smp_call_function_single(cpu, data, 0);
+ smp_call_function_single_async(cpu, data);
return 0;
}
@@ -136,7 +141,7 @@ void __blk_complete_request(struct request *req)
struct list_head *list;
do_local:
list = this_cpu_ptr(&blk_cpu_done);
- list_add_tail(&req->csd.list, list);
+ list_add_tail(&req->queuelist, list);
/*
* if the list only contains our just added request,
@@ -144,7 +149,7 @@ do_local:
* entries there, someone already raised the irq but it
* hasn't run yet.
*/
- if (list->next == &req->csd.list)
+ if (list->next == &req->queuelist)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
} else if (raise_blk_irq(ccpu, req))
goto do_local;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 744833b..5873e4a 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2367,10 +2367,10 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
* reposition in fifo if next is older than rq
*/
if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
- time_before(rq_fifo_time(next), rq_fifo_time(rq)) &&
+ time_before(next->fifo_time, rq->fifo_time) &&
cfqq == RQ_CFQQ(next)) {
list_move(&rq->queuelist, &next->queuelist);
- rq_set_fifo_time(rq, rq_fifo_time(next));
+ rq->fifo_time = next->fifo_time;
}
if (cfqq->next_rq == next)
@@ -2814,7 +2814,7 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
return NULL;
rq = rq_entry_fifo(cfqq->fifo.next);
- if (time_before(jiffies, rq_fifo_time(rq)))
+ if (time_before(jiffies, rq->fifo_time))
rq = NULL;
cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
@@ -3927,7 +3927,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
cfq_log_cfqq(cfqd, cfqq, "insert_request");
cfq_init_prio_data(cfqq, RQ_CIC(rq));
- rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
+ rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_add_rq_rb(rq);
cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 9ef6640..a753df2 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -106,7 +106,7 @@ deadline_add_request(struct request_queue *q, struct request *rq)
/*
* set expire time and add to fifo list
*/
- rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
+ rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
}
@@ -174,9 +174,9 @@ deadline_merged_requests(struct request_queue *q, struct request *req,
* and move into next position (next will be deleted) in fifo
*/
if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
- if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
+ if (time_before(next->fifo_time, req->fifo_time)) {
list_move(&req->queuelist, &next->queuelist);
- rq_set_fifo_time(req, rq_fifo_time(next));
+ req->fifo_time = next->fifo_time;
}
}
@@ -230,7 +230,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
/*
* rq is expired!
*/
- if (time_after_eq(jiffies, rq_fifo_time(rq)))
+ if (time_after_eq(jiffies, rq->fifo_time))
return 1;
return 0;
diff --git a/block/partitions/atari.h b/block/partitions/atari.h
index fe2d32a..f2ec43b 100644
--- a/block/partitions/atari.h
+++ b/block/partitions/atari.h
@@ -11,6 +11,8 @@
* by Guenther Kelleter (guenther@pool.informatik.rwth-aachen.de)
*/
+#include <linux/compiler.h>
+
struct partition_info
{
u8 flg; /* bit 0: active; bit 7: bootable */
@@ -29,6 +31,6 @@ struct rootsector
u32 bsl_st; /* start of bad sector list */
u32 bsl_cnt; /* length of bad sector list */
u16 checksum; /* checksum for bootable disks */
-} __attribute__((__packed__));
+} __packed;
int atari_partition(struct parsed_partitions *state);
diff --git a/block/partitions/efi.h b/block/partitions/efi.h
index 4efcafb..abd0b192 100644
--- a/block/partitions/efi.h
+++ b/block/partitions/efi.h
@@ -32,6 +32,7 @@
#include <linux/major.h>
#include <linux/string.h>
#include <linux/efi.h>
+#include <linux/compiler.h>
#define MSDOS_MBR_SIGNATURE 0xaa55
#define EFI_PMBR_OSTYPE_EFI 0xEF
@@ -87,13 +88,13 @@ typedef struct _gpt_header {
*
* uint8_t reserved2[ BlockSize - 92 ];
*/
-} __attribute__ ((packed)) gpt_header;
+} __packed gpt_header;
typedef struct _gpt_entry_attributes {
u64 required_to_function:1;
u64 reserved:47;
u64 type_guid_specific:16;
-} __attribute__ ((packed)) gpt_entry_attributes;
+} __packed gpt_entry_attributes;
typedef struct _gpt_entry {
efi_guid_t partition_type_guid;
@@ -102,7 +103,7 @@ typedef struct _gpt_entry {
__le64 ending_lba;
gpt_entry_attributes attributes;
efi_char16_t partition_name[72 / sizeof (efi_char16_t)];
-} __attribute__ ((packed)) gpt_entry;
+} __packed gpt_entry;
typedef struct _gpt_mbr_record {
u8 boot_indicator; /* unused by EFI, set to 0x80 for bootable */
@@ -124,7 +125,7 @@ typedef struct _legacy_mbr {
__le16 unknown;
gpt_mbr_record partition_record[4];
__le16 signature;
-} __attribute__ ((packed)) legacy_mbr;
+} __packed legacy_mbr;
/* Functions */
extern int efi_partition(struct parsed_partitions *state);
diff --git a/block/partitions/karma.c b/block/partitions/karma.c
index 0ea1931..9721fa5 100644
--- a/block/partitions/karma.c
+++ b/block/partitions/karma.c
@@ -8,6 +8,7 @@
#include "check.h"
#include "karma.h"
+#include <linux/compiler.h>
int karma_partition(struct parsed_partitions *state)
{
@@ -26,7 +27,7 @@ int karma_partition(struct parsed_partitions *state)
} d_partitions[2];
u8 d_blank[208];
__le16 d_magic;
- } __attribute__((packed)) *label;
+ } __packed *label;
struct d_partition *p;
data = read_part_sector(state, 0, &sect);