summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig3
-rw-r--r--block/Kconfig.iosched2
-rw-r--r--block/blk-barrier.c1
-rw-r--r--block/blk-cgroup.c54
-rw-r--r--block/blk-cgroup.h10
-rw-r--r--block/blk-integrity.c1
-rw-r--r--block/blk-ioc.c1
-rw-r--r--block/blk-settings.c12
-rw-r--r--block/blk-sysfs.c26
-rw-r--r--block/blk-tag.c1
-rw-r--r--block/blk-timeout.c12
-rw-r--r--block/bsg.c3
-rw-r--r--block/cfq-iosched.c42
-rw-r--r--block/compat_ioctl.c1
-rw-r--r--block/elevator.c2
-rw-r--r--block/ioctl.c1
-rw-r--r--block/noop-iosched.c1
17 files changed, 128 insertions, 45 deletions
diff --git a/block/Kconfig b/block/Kconfig
index e20fbde..f9e89f4 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -78,8 +78,9 @@ config BLK_DEV_INTEGRITY
Protection. If in doubt, say N.
config BLK_CGROUP
- bool
+ tristate "Block cgroup support"
depends on CGROUPS
+ depends on CFQ_GROUP_IOSCHED
default n
---help---
Generic block IO controller cgroup interface. This is the common
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index b71abfb..fc71cf0 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -23,6 +23,7 @@ config IOSCHED_DEADLINE
config IOSCHED_CFQ
tristate "CFQ I/O scheduler"
+ select BLK_CGROUP if CFQ_GROUP_IOSCHED
default y
---help---
The CFQ I/O scheduler tries to distribute bandwidth equally
@@ -35,7 +36,6 @@ config IOSCHED_CFQ
config CFQ_GROUP_IOSCHED
bool "CFQ Group Scheduling support"
depends on IOSCHED_CFQ && CGROUPS
- select BLK_CGROUP
default n
---help---
Enable group IO scheduling in CFQ.
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 8618d89..6d88544 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -5,6 +5,7 @@
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/gfp.h>
#include "blk.h"
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index c85d74c..5fe03de 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -15,6 +15,7 @@
#include <linux/kdev_t.h>
#include <linux/module.h>
#include <linux/err.h>
+#include <linux/slab.h>
#include "blk-cgroup.h"
static DEFINE_SPINLOCK(blkio_list_lock);
@@ -23,6 +24,31 @@ static LIST_HEAD(blkio_list);
struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
EXPORT_SYMBOL_GPL(blkio_root_cgroup);
+static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
+ struct cgroup *);
+static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
+ struct task_struct *, bool);
+static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
+ struct cgroup *, struct task_struct *, bool);
+static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
+static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
+
+struct cgroup_subsys blkio_subsys = {
+ .name = "blkio",
+ .create = blkiocg_create,
+ .can_attach = blkiocg_can_attach,
+ .attach = blkiocg_attach,
+ .destroy = blkiocg_destroy,
+ .populate = blkiocg_populate,
+#ifdef CONFIG_BLK_CGROUP
+ /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
+ .subsys_id = blkio_subsys_id,
+#endif
+ .use_id = 1,
+ .module = THIS_MODULE,
+};
+EXPORT_SYMBOL_GPL(blkio_subsys);
+
struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
{
return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
@@ -253,7 +279,8 @@ remove_entry:
done:
free_css_id(&blkio_subsys, &blkcg->css);
rcu_read_unlock();
- kfree(blkcg);
+ if (blkcg != &blkio_root_cgroup)
+ kfree(blkcg);
}
static struct cgroup_subsys_state *
@@ -319,17 +346,6 @@ static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
task_unlock(tsk);
}
-struct cgroup_subsys blkio_subsys = {
- .name = "blkio",
- .create = blkiocg_create,
- .can_attach = blkiocg_can_attach,
- .attach = blkiocg_attach,
- .destroy = blkiocg_destroy,
- .populate = blkiocg_populate,
- .subsys_id = blkio_subsys_id,
- .use_id = 1,
-};
-
void blkio_policy_register(struct blkio_policy_type *blkiop)
{
spin_lock(&blkio_list_lock);
@@ -345,3 +361,17 @@ void blkio_policy_unregister(struct blkio_policy_type *blkiop)
spin_unlock(&blkio_list_lock);
}
EXPORT_SYMBOL_GPL(blkio_policy_unregister);
+
+static int __init init_cgroup_blkio(void)
+{
+ return cgroup_load_subsys(&blkio_subsys);
+}
+
+static void __exit exit_cgroup_blkio(void)
+{
+ cgroup_unload_subsys(&blkio_subsys);
+}
+
+module_init(init_cgroup_blkio);
+module_exit(exit_cgroup_blkio);
+MODULE_LICENSE("GPL");
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 84bf745..8ccc204 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -15,7 +15,13 @@
#include <linux/cgroup.h>
-#ifdef CONFIG_BLK_CGROUP
+#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
+
+#ifndef CONFIG_BLK_CGROUP
+/* When blk-cgroup is a module, its subsys_id isn't a compile-time constant */
+extern struct cgroup_subsys blkio_subsys;
+#define blkio_subsys_id blkio_subsys.subsys_id
+#endif
struct blkio_cgroup {
struct cgroup_subsys_state css;
@@ -91,7 +97,7 @@ static inline void blkiocg_update_blkio_group_dequeue_stats(
struct blkio_group *blkg, unsigned long dequeue) {}
#endif
-#ifdef CONFIG_BLK_CGROUP
+#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
extern struct blkio_cgroup blkio_root_cgroup;
extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 96e83c2..edce1ef 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -24,6 +24,7 @@
#include <linux/mempool.h>
#include <linux/bio.h>
#include <linux/scatterlist.h>
+#include <linux/slab.h>
#include "blk.h"
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 3f65c8a..d22c4c5 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -7,6 +7,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
+#include <linux/slab.h>
#include "blk.h"
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 31e7a93..f5ed5a1 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -8,7 +8,9 @@
#include <linux/blkdev.h>
#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
#include <linux/gcd.h>
+#include <linux/lcm.h>
#include <linux/jiffies.h>
+#include <linux/gfp.h>
#include "blk.h"
@@ -461,16 +463,6 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
}
EXPORT_SYMBOL(blk_queue_stack_limits);
-static unsigned int lcm(unsigned int a, unsigned int b)
-{
- if (a && b)
- return (a * b) / gcd(a, b);
- else if (b)
- return b;
-
- return a;
-}
-
/**
* blk_stack_limits - adjust queue_limits for stacked devices
* @t: the stacking driver limits (top device)
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 2ae2cb3..306759b 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -2,6 +2,7 @@
* Functions related to sysfs handling
*/
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
@@ -106,6 +107,19 @@ static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
return queue_var_show(max_sectors_kb, (page));
}
+static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(queue_max_segments(q), (page));
+}
+
+static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
+{
+ if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
+ return queue_var_show(queue_max_segment_size(q), (page));
+
+ return queue_var_show(PAGE_CACHE_SIZE, (page));
+}
+
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
{
return queue_var_show(queue_logical_block_size(q), page);
@@ -280,6 +294,16 @@ static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
.show = queue_max_hw_sectors_show,
};
+static struct queue_sysfs_entry queue_max_segments_entry = {
+ .attr = {.name = "max_segments", .mode = S_IRUGO },
+ .show = queue_max_segments_show,
+};
+
+static struct queue_sysfs_entry queue_max_segment_size_entry = {
+ .attr = {.name = "max_segment_size", .mode = S_IRUGO },
+ .show = queue_max_segment_size_show,
+};
+
static struct queue_sysfs_entry queue_iosched_entry = {
.attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
.show = elv_iosched_show,
@@ -355,6 +379,8 @@ static struct attribute *default_attrs[] = {
&queue_ra_entry.attr,
&queue_max_hw_sectors_entry.attr,
&queue_max_sectors_entry.attr,
+ &queue_max_segments_entry.attr,
+ &queue_max_segment_size_entry.attr,
&queue_iosched_entry.attr,
&queue_hw_sector_size_entry.attr,
&queue_logical_block_size_entry.attr,
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 6b0f52c..ece65fc 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -5,6 +5,7 @@
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/slab.h>
#include "blk.h"
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 1ba7e0a..4f0c06c 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -109,6 +109,7 @@ void blk_rq_timed_out_timer(unsigned long data)
struct request_queue *q = (struct request_queue *) data;
unsigned long flags, next = 0;
struct request *rq, *tmp;
+ int next_set = 0;
spin_lock_irqsave(q->queue_lock, flags);
@@ -122,16 +123,13 @@ void blk_rq_timed_out_timer(unsigned long data)
if (blk_mark_rq_complete(rq))
continue;
blk_rq_timed_out(rq);
- } else if (!next || time_after(next, rq->deadline))
+ } else if (!next_set || time_after(next, rq->deadline)) {
next = rq->deadline;
+ next_set = 1;
+ }
}
- /*
- * next can never be 0 here with the list non-empty, since we always
- * bump ->deadline to 1 so we can detect if the timer was ever added
- * or not. See comment in blk_add_timer()
- */
- if (next)
+ if (next_set)
mod_timer(&q->timeout, round_jiffies_up(next));
spin_unlock_irqrestore(q->queue_lock, flags);
diff --git a/block/bsg.c b/block/bsg.c
index a9fd2d8..82d5882 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -21,6 +21,7 @@
#include <linux/idr.h>
#include <linux/bsg.h>
#include <linux/smp_lock.h>
+#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_ioctl.h>
@@ -260,7 +261,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
return ERR_PTR(ret);
/*
- * map scatter-gather elements seperately and string them to request
+ * map scatter-gather elements separately and string them to request
*/
rq = blk_get_request(q, rw, GFP_KERNEL);
if (!rq)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index dee9d93..838834b 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -7,6 +7,7 @@
* Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
*/
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/jiffies.h>
@@ -47,6 +48,7 @@ static const int cfq_hist_divisor = 4;
#define CFQ_SERVICE_SHIFT 12
#define CFQQ_SEEK_THR (sector_t)(8 * 100)
+#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
@@ -947,6 +949,11 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
unsigned int major, minor;
cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
+ if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
+ sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
+ cfqg->blkg.dev = MKDEV(major, minor);
+ goto done;
+ }
if (cfqg || !create)
goto done;
@@ -1517,7 +1524,8 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
struct cfq_queue *cfqq)
{
if (cfqq) {
- cfq_log_cfqq(cfqd, cfqq, "set_active");
+ cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
+ cfqd->serving_prio, cfqd->serving_type);
cfqq->slice_start = 0;
cfqq->dispatch_start = jiffies;
cfqq->allocated_slice = 0;
@@ -1660,9 +1668,9 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
}
static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- struct request *rq, bool for_preempt)
+ struct request *rq)
{
- return cfq_dist_from_last(cfqd, rq) <= CFQQ_SEEK_THR;
+ return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
}
static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
@@ -1689,7 +1697,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
* will contain the closest sector.
*/
__cfqq = rb_entry(parent, struct cfq_queue, p_node);
- if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false))
+ if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
return __cfqq;
if (blk_rq_pos(__cfqq->next_rq) < sector)
@@ -1700,7 +1708,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
return NULL;
__cfqq = rb_entry(node, struct cfq_queue, p_node);
- if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false))
+ if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
return __cfqq;
return NULL;
@@ -1721,6 +1729,8 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
{
struct cfq_queue *cfqq;
+ if (cfq_class_idle(cur_cfqq))
+ return NULL;
if (!cfq_cfqq_sync(cur_cfqq))
return NULL;
if (CFQQ_SEEKY(cur_cfqq))
@@ -1787,7 +1797,11 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
* Otherwise, we do only if they are the last ones
* in their service tree.
*/
- return service_tree->count == 1 && cfq_cfqq_sync(cfqq);
+ if (service_tree->count == 1 && cfq_cfqq_sync(cfqq))
+ return 1;
+ cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
+ service_tree->count);
+ return 0;
}
static void cfq_arm_slice_timer(struct cfq_data *cfqd)
@@ -1832,8 +1846,11 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
* time slice.
*/
if (sample_valid(cic->ttime_samples) &&
- (cfqq->slice_end - jiffies < cic->ttime_mean))
+ (cfqq->slice_end - jiffies < cic->ttime_mean)) {
+ cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d",
+ cic->ttime_mean);
return;
+ }
cfq_mark_cfqq_wait_request(cfqq);
@@ -2041,6 +2058,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
slice = max(slice, 2 * cfqd->cfq_slice_idle);
slice = max_t(unsigned, slice, CFQ_MIN_TT);
+ cfq_log(cfqd, "workload slice:%d", slice);
cfqd->workload_expires = jiffies + slice;
cfqd->noidle_tree_requires_idle = false;
}
@@ -2188,10 +2206,13 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
struct cfq_queue *cfqq;
int dispatched = 0;
- while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL)
+ /* Expire the timeslice of the current active queue first */
+ cfq_slice_expired(cfqd, 0);
+ while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
+ __cfq_set_active_queue(cfqd, cfqq);
dispatched += __cfq_forced_dispatch_cfqq(cfqq);
+ }
- cfq_slice_expired(cfqd, 0);
BUG_ON(cfqd->busy_queues);
cfq_log(cfqd, "forced_dispatch=%d", dispatched);
@@ -3103,7 +3124,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
* if this request is as-good as one we would expect from the
* current cfqq, let it preempt
*/
- if (cfq_rq_close(cfqd, cfqq, rq, true))
+ if (cfq_rq_close(cfqd, cfqq, rq))
return true;
return false;
@@ -3307,6 +3328,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
if (cfq_should_wait_busy(cfqd, cfqq)) {
cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
cfq_mark_cfqq_wait_busy(cfqq);
+ cfq_log_cfqq(cfqd, cfqq, "will busy wait");
}
/*
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 4eb8e9e..f26051f 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -6,6 +6,7 @@
#include <linux/elevator.h>
#include <linux/fd.h>
#include <linux/hdreg.h>
+#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/smp_lock.h>
#include <linux/types.h>
diff --git a/block/elevator.c b/block/elevator.c
index df75676..76e3702 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -154,7 +154,7 @@ static struct elevator_type *elevator_get(const char *name)
spin_unlock(&elv_list_lock);
- sprintf(elv, "%s-iosched", name);
+ snprintf(elv, sizeof(elv), "%s-iosched", name);
request_module("%s", elv);
spin_lock(&elv_list_lock);
diff --git a/block/ioctl.c b/block/ioctl.c
index be48ea5..8905d2a 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -1,5 +1,6 @@
#include <linux/capability.h>
#include <linux/blkdev.h>
+#include <linux/gfp.h>
#include <linux/blkpg.h>
#include <linux/hdreg.h>
#include <linux/backing-dev.h>
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index 3a0d369..232c4b3 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -5,6 +5,7 @@
#include <linux/elevator.h>
#include <linux/bio.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/init.h>
struct noop_data {