summaryrefslogtreecommitdiff
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c22
1 files changed, 17 insertions, 5 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 5d55a89..6e35762 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -737,7 +737,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
{
struct buffer_head *bh;
struct list_head tmp;
- struct address_space *mapping;
+ struct address_space *mapping, *prev_mapping = NULL;
int err = 0, err2;
INIT_LIST_HEAD(&tmp);
@@ -762,7 +762,18 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
* contents - it is a noop if I/O is still in
* flight on potentially older contents.
*/
- ll_rw_block(SWRITE_SYNC, 1, &bh);
+ ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
+
+ /*
+ * Kick off IO for the previous mapping. Note
+ * that we will not run the very last mapping,
+ * wait_on_buffer() will do that for us
+ * through sync_buffer().
+ */
+ if (prev_mapping && prev_mapping != mapping)
+ blk_run_address_space(prev_mapping);
+ prev_mapping = mapping;
+
brelse(bh);
spin_lock(lock);
}
@@ -2957,12 +2968,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
for (i = 0; i < nr; i++) {
struct buffer_head *bh = bhs[i];
- if (rw == SWRITE || rw == SWRITE_SYNC)
+ if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
lock_buffer(bh);
else if (!trylock_buffer(bh))
continue;
- if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
+ if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
+ rw == SWRITE_SYNC_PLUG) {
if (test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
@@ -2998,7 +3010,7 @@ int sync_dirty_buffer(struct buffer_head *bh)
if (test_clear_buffer_dirty(bh)) {
get_bh(bh);
bh->b_end_io = end_buffer_write_sync;
- ret = submit_bh(WRITE, bh);
+ ret = submit_bh(WRITE_SYNC, bh);
wait_on_buffer(bh);
if (buffer_eopnotsupp(bh)) {
clear_buffer_eopnotsupp(bh);