summaryrefslogtreecommitdiff
path: root/drivers/md/bcache/bcache.h
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-06-05 13:24:39 (GMT)
committerKent Overstreet <koverstreet@google.com>2013-06-27 04:58:04 (GMT)
commit72c270612bd33192fa836ad0f2939af1ca218292 (patch)
tree344129d75f3b5c0abcf77dd4b6340783a126cde8 /drivers/md/bcache/bcache.h
parent279afbad4e54acbd61bf88a54a73af3bbfdeb5dd (diff)
downloadlinux-72c270612bd33192fa836ad0f2939af1ca218292.tar.xz
bcache: Write out full stripes
Now that we're tracking dirty data per stripe, we can add two optimizations for raid5/6: * If a stripe is already dirty, force writes to that stripe to writeback mode - to help build up full stripes of dirty data * When flushing dirty data, preferentially write out full stripes first if there are any. Signed-off-by: Kent Overstreet <koverstreet@google.com>
Diffstat (limited to 'drivers/md/bcache/bcache.h')
-rw-r--r--drivers/md/bcache/bcache.h3
1 files changed, 1 insertions, 2 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index dbddef0..342ba86 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -387,8 +387,6 @@ struct keybuf_key {
typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
struct keybuf {
- keybuf_pred_fn *key_predicate;
-
struct bkey last_scanned;
spinlock_t lock;
@@ -532,6 +530,7 @@ struct cached_dev {
unsigned sequential_merge:1;
unsigned verify:1;
+ unsigned partial_stripes_expensive:1;
unsigned writeback_metadata:1;
unsigned writeback_running:1;
unsigned char writeback_percent;