summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2015-04-20 01:32:26 (GMT)
committerDave Airlie <airlied@redhat.com>2015-04-20 03:05:20 (GMT)
commit2c33ce009ca2389dbf0535d0672214d09738e35e (patch)
tree6186a6458c3c160385d794a23eaf07c786a9e61b /block
parentcec32a47010647e8b0603726ebb75b990a4057a4 (diff)
parent09d51602cf84a1264946711dd4ea0dddbac599a1 (diff)
downloadlinux-2c33ce009ca2389dbf0535d0672214d09738e35e.tar.xz
Merge Linus master into drm-next
The merge is clean, but the arm build fails afterwards, due to API changes in the regulator tree. I've included the patch into the merge to fix the build. Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c19
-rw-r--r--block/blk-map.c6
-rw-r--r--block/blk-mq-sysfs.c1
-rw-r--r--block/blk-mq.c75
-rw-r--r--block/scsi_ioctl.c12
5 files changed, 69 insertions, 44 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 794c3e7..fd154b9 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -557,6 +557,18 @@ void blk_cleanup_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_cleanup_queue);
+/* Allocate memory local to the request queue */
+static void *alloc_request_struct(gfp_t gfp_mask, void *data)
+{
+ int nid = (int)(long)data;
+ return kmem_cache_alloc_node(request_cachep, gfp_mask, nid);
+}
+
+static void free_request_struct(void *element, void *unused)
+{
+ kmem_cache_free(request_cachep, element);
+}
+
int blk_init_rl(struct request_list *rl, struct request_queue *q,
gfp_t gfp_mask)
{
@@ -569,9 +581,10 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
- rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
- mempool_free_slab, request_cachep,
- gfp_mask, q->node);
+ rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_struct,
+ free_request_struct,
+ (void *)(long)q->node, gfp_mask,
+ q->node);
if (!rl->rq_pool)
return -ENOMEM;
diff --git a/block/blk-map.c b/block/blk-map.c
index b8d2725..da310a1 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -124,10 +124,10 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
{
struct iovec iov;
struct iov_iter i;
+ int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
- iov.iov_base = ubuf;
- iov.iov_len = len;
- iov_iter_init(&i, rq_data_dir(rq), &iov, 1, len);
+ if (unlikely(ret < 0))
+ return ret;
return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 1630a20..b79685e 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -436,6 +436,7 @@ int blk_mq_register_disk(struct gendisk *disk)
return 0;
}
+EXPORT_SYMBOL_GPL(blk_mq_register_disk);
void blk_mq_sysfs_unregister(struct request_queue *q)
{
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b7b8933..ade8a2d 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -33,7 +33,6 @@ static DEFINE_MUTEX(all_q_mutex);
static LIST_HEAD(all_q_list);
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
-static void blk_mq_run_queues(struct request_queue *q);
/*
* Check if any of the ctx's have pending work in this hardware queue
@@ -42,7 +41,7 @@ static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
{
unsigned int i;
- for (i = 0; i < hctx->ctx_map.map_size; i++)
+ for (i = 0; i < hctx->ctx_map.size; i++)
if (hctx->ctx_map.map[i].word)
return true;
@@ -78,7 +77,7 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
}
-static int blk_mq_queue_enter(struct request_queue *q)
+static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
{
while (true) {
int ret;
@@ -86,6 +85,9 @@ static int blk_mq_queue_enter(struct request_queue *q)
if (percpu_ref_tryget_live(&q->mq_usage_counter))
return 0;
+ if (!(gfp & __GFP_WAIT))
+ return -EBUSY;
+
ret = wait_event_interruptible(q->mq_freeze_wq,
!q->mq_freeze_depth || blk_queue_dying(q));
if (blk_queue_dying(q))
@@ -118,7 +120,7 @@ void blk_mq_freeze_queue_start(struct request_queue *q)
if (freeze) {
percpu_ref_kill(&q->mq_usage_counter);
- blk_mq_run_queues(q);
+ blk_mq_run_hw_queues(q, false);
}
}
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
@@ -257,7 +259,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
struct blk_mq_alloc_data alloc_data;
int ret;
- ret = blk_mq_queue_enter(q);
+ ret = blk_mq_queue_enter(q, gfp);
if (ret)
return ERR_PTR(ret);
@@ -728,7 +730,7 @@ static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
struct blk_mq_ctx *ctx;
int i;
- for (i = 0; i < hctx->ctx_map.map_size; i++) {
+ for (i = 0; i < hctx->ctx_map.size; i++) {
struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
unsigned int off, bit;
@@ -904,7 +906,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
&hctx->run_work, 0);
}
-static void blk_mq_run_queues(struct request_queue *q)
+void blk_mq_run_hw_queues(struct request_queue *q, bool async)
{
struct blk_mq_hw_ctx *hctx;
int i;
@@ -915,9 +917,10 @@ static void blk_mq_run_queues(struct request_queue *q)
test_bit(BLK_MQ_S_STOPPED, &hctx->state))
continue;
- blk_mq_run_hw_queue(hctx, false);
+ blk_mq_run_hw_queue(hctx, async);
}
}
+EXPORT_SYMBOL(blk_mq_run_hw_queues);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
{
@@ -1186,7 +1189,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
int rw = bio_data_dir(bio);
struct blk_mq_alloc_data alloc_data;
- if (unlikely(blk_mq_queue_enter(q))) {
+ if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
bio_endio(bio, -EIO);
return NULL;
}
@@ -1457,7 +1460,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
do {
page = alloc_pages_node(set->numa_node,
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
this_order);
if (page)
break;
@@ -1479,8 +1482,6 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
left -= to_do * rq_size;
for (j = 0; j < to_do; j++) {
tags->rqs[i] = p;
- tags->rqs[i]->atomic_flags = 0;
- tags->rqs[i]->cmd_flags = 0;
if (set->ops->init_request) {
if (set->ops->init_request(set->driver_data,
tags->rqs[i], hctx_idx, i,
@@ -1519,8 +1520,6 @@ static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
if (!bitmap->map)
return -ENOMEM;
- bitmap->map_size = num_maps;
-
total = nr_cpu_ids;
for (i = 0; i < num_maps; i++) {
bitmap->map[i].depth = min(total, bitmap->bits_per_word);
@@ -1761,8 +1760,6 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
continue;
hctx = q->mq_ops->map_queue(q, i);
- cpumask_set_cpu(i, hctx->cpumask);
- hctx->nr_ctx++;
/*
* Set local node, IFF we have more than one hw queue. If
@@ -1799,6 +1796,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
}
queue_for_each_hw_ctx(q, hctx, i) {
+ struct blk_mq_ctxmap *map = &hctx->ctx_map;
+
/*
* If no software queues are mapped to this hardware queue,
* disable it and free the request entries.
@@ -1815,6 +1814,13 @@ static void blk_mq_map_swqueue(struct request_queue *q)
}
/*
+ * Set the map size to the number of mapped software queues.
+ * This is more accurate and more efficient than looping
+ * over all possibly mapped software queues.
+ */
+ map->size = DIV_ROUND_UP(hctx->nr_ctx, map->bits_per_word);
+
+ /*
* Initialize batch roundrobin counts
*/
hctx->next_cpu = cpumask_first(hctx->cpumask);
@@ -1891,9 +1897,25 @@ void blk_mq_release(struct request_queue *q)
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
{
+ struct request_queue *uninit_q, *q;
+
+ uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
+ if (!uninit_q)
+ return ERR_PTR(-ENOMEM);
+
+ q = blk_mq_init_allocated_queue(set, uninit_q);
+ if (IS_ERR(q))
+ blk_cleanup_queue(uninit_q);
+
+ return q;
+}
+EXPORT_SYMBOL(blk_mq_init_queue);
+
+struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ struct request_queue *q)
+{
struct blk_mq_hw_ctx **hctxs;
struct blk_mq_ctx __percpu *ctx;
- struct request_queue *q;
unsigned int *map;
int i;
@@ -1928,20 +1950,16 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
hctxs[i]->queue_num = i;
}
- q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
- if (!q)
- goto err_hctxs;
-
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
* See blk_register_queue() for details.
*/
if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
- goto err_mq_usage;
+ goto err_hctxs;
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
- blk_queue_rq_timeout(q, 30000);
+ blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30000);
q->nr_queues = nr_cpu_ids;
q->nr_hw_queues = set->nr_hw_queues;
@@ -1967,9 +1985,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
else
blk_queue_make_request(q, blk_sq_make_request);
- if (set->timeout)
- blk_queue_rq_timeout(q, set->timeout);
-
/*
* Do this after blk_queue_make_request() overrides it...
*/
@@ -1981,7 +1996,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
if (blk_mq_init_hw_queues(q, set))
- goto err_mq_usage;
+ goto err_hctxs;
mutex_lock(&all_q_mutex);
list_add_tail(&q->all_q_node, &all_q_list);
@@ -1993,8 +2008,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
return q;
-err_mq_usage:
- blk_cleanup_queue(q);
err_hctxs:
kfree(map);
for (i = 0; i < set->nr_hw_queues; i++) {
@@ -2009,7 +2022,7 @@ err_percpu:
free_percpu(ctx);
return ERR_PTR(-ENOMEM);
}
-EXPORT_SYMBOL(blk_mq_init_queue);
+EXPORT_SYMBOL(blk_mq_init_allocated_queue);
void blk_mq_free_queue(struct request_queue *q)
{
@@ -2161,7 +2174,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
return -EINVAL;
- if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
+ if (!set->ops->queue_rq || !set->ops->map_queue)
return -EINVAL;
if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index e1f71c3..55b6f15 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -335,16 +335,14 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
struct iov_iter i;
struct iovec *iov = NULL;
- ret = rw_copy_check_uvector(-1, hdr->dxferp, hdr->iovec_count,
- 0, NULL, &iov);
- if (ret < 0) {
- kfree(iov);
+ ret = import_iovec(rq_data_dir(rq),
+ hdr->dxferp, hdr->iovec_count,
+ 0, &iov, &i);
+ if (ret < 0)
goto out_free_cdb;
- }
/* SG_IO howto says that the shorter of the two wins */
- iov_iter_init(&i, rq_data_dir(rq), iov, hdr->iovec_count,
- min_t(unsigned, ret, hdr->dxfer_len));
+ iov_iter_truncate(&i, hdr->dxfer_len);
ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL);
kfree(iov);