summaryrefslogtreecommitdiff
path: root/fs/btrfs/extent_io.h
diff options
context:
space:
mode:
authorChris Mason <chris.mason@fusionio.com>2013-03-26 17:07:00 (GMT)
committerChris Mason <chris.mason@fusionio.com>2013-03-26 17:19:14 (GMT)
commit4adaa611020fa6ac65b0ac8db78276af4ec04e63 (patch)
treeb478ee3f9a16714521f5ced528ff2ce7afd71b8e /fs/btrfs/extent_io.h
parent1dd05682b3ef6e70409e130bfd83e91770801589 (diff)
downloadlinux-4adaa611020fa6ac65b0ac8db78276af4ec04e63.tar.xz
Btrfs: fix race between mmap writes and compression
Btrfs uses page_mkwrite to ensure stable pages during crc calculations and mmap workloads. We call clear_page_dirty_for_io before we do any crcs, and this forces any application with the file mapped to wait for the crc to finish before it is allowed to change the file. With compression on, the clear_page_dirty_for_io step is happening after we've compressed the pages. This means the applications might be changing the pages while we are compressing them, and some of those modifications might not hit the disk. This commit adds the clear_page_dirty_for_io before compression starts and makes sure to redirty the page if we have to fallback to uncompressed IO as well. Signed-off-by: Chris Mason <chris.mason@fusionio.com> Reported-by: Alexandre Oliva <oliva@gnu.org> cc: stable@vger.kernel.org
Diffstat (limited to 'fs/btrfs/extent_io.h')
-rw-r--r--fs/btrfs/extent_io.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 6068a19..258c921 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -325,6 +325,8 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
unsigned long *map_len);
int extent_range_uptodate(struct extent_io_tree *tree,
u64 start, u64 end);
+int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
+int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
int extent_clear_unlock_delalloc(struct inode *inode,
struct extent_io_tree *tree,
u64 start, u64 end, struct page *locked_page,