summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-11-23 19:16:36 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2014-11-23 19:16:36 (GMT)
commitd038a63ace6cf2ce3aeafa741b73d542ffb65163 (patch)
tree9d7b2bcc203fb3074a1f768fa30b071042690013
parentcb95413971d605b0d152d3ceecc47ba8991d66fb (diff)
parentf82c458a2c3ffb94b431fc6ad791a79df1b3713e (diff)
downloadlinux-d038a63ace6cf2ce3aeafa741b73d542ffb65163.tar.xz
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs deadlock fix from Chris Mason: "This has a fix for a long standing deadlock that we've been trying to nail down for a while. It ended up being a bad interaction with the fair reader/writer locks and the order btrfs reacquires locks in the btree" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: btrfs: fix lockups from btrfs_clear_path_blocking
-rw-r--r--fs/btrfs/ctree.c14
-rw-r--r--fs/btrfs/locking.c24
-rw-r--r--fs/btrfs/locking.h2
3 files changed, 25 insertions, 15 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 19bc616..150822e 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -80,13 +80,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
{
int i;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- /* lockdep really cares that we take all of these spinlocks
- * in the right order. If any of the locks in the path are not
- * currently blocking, it is going to complain. So, make really
- * really sure by forcing the path to blocking before we clear
- * the path blocking.
- */
if (held) {
btrfs_set_lock_blocking_rw(held, held_rw);
if (held_rw == BTRFS_WRITE_LOCK)
@@ -95,7 +88,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
held_rw = BTRFS_READ_LOCK_BLOCKING;
}
btrfs_set_path_blocking(p);
-#endif
for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
if (p->nodes[i] && p->locks[i]) {
@@ -107,10 +99,8 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
}
}
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
if (held)
btrfs_clear_lock_blocking_rw(held, held_rw);
-#endif
}
/* this also releases the path */
@@ -2893,7 +2883,7 @@ cow_done:
}
p->locks[level] = BTRFS_WRITE_LOCK;
} else {
- err = btrfs_try_tree_read_lock(b);
+ err = btrfs_tree_read_lock_atomic(b);
if (!err) {
btrfs_set_path_blocking(p);
btrfs_tree_read_lock(b);
@@ -3025,7 +3015,7 @@ again:
}
level = btrfs_header_level(b);
- err = btrfs_try_tree_read_lock(b);
+ err = btrfs_tree_read_lock_atomic(b);
if (!err) {
btrfs_set_path_blocking(p);
btrfs_tree_read_lock(b);
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 5665d21..f8229ef 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -128,6 +128,26 @@ again:
}
/*
+ * take a spinning read lock.
+ * returns 1 if we get the read lock and 0 if we don't
+ * this won't wait for blocking writers
+ */
+int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
+{
+ if (atomic_read(&eb->blocking_writers))
+ return 0;
+
+ read_lock(&eb->lock);
+ if (atomic_read(&eb->blocking_writers)) {
+ read_unlock(&eb->lock);
+ return 0;
+ }
+ atomic_inc(&eb->read_locks);
+ atomic_inc(&eb->spinning_readers);
+ return 1;
+}
+
+/*
* returns 1 if we get the read lock and 0 if we don't
* this won't wait for blocking writers
*/
@@ -158,9 +178,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
atomic_read(&eb->blocking_readers))
return 0;
- if (!write_trylock(&eb->lock))
- return 0;
-
+ write_lock(&eb->lock);
if (atomic_read(&eb->blocking_writers) ||
atomic_read(&eb->blocking_readers)) {
write_unlock(&eb->lock);
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index b81e0e9..c44a9d5 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -35,6 +35,8 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw);
void btrfs_assert_tree_locked(struct extent_buffer *eb);
int btrfs_try_tree_read_lock(struct extent_buffer *eb);
int btrfs_try_tree_write_lock(struct extent_buffer *eb);
+int btrfs_tree_read_lock_atomic(struct extent_buffer *eb);
+
static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
{