From 8144f1f69943f447fd1bcb2d26ca011002d5df63 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 11 Aug 2014 13:36:54 -0400 Subject: locks: show delegations as "DELEG" in /proc/locks Now that they are a distinct lease type, show them as such. Cc: J. Bruce Fields Signed-off-by: Jeff Layton diff --git a/fs/locks.c b/fs/locks.c index a6f5480..356667a 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -2452,7 +2452,11 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, seq_puts(f, "FLOCK ADVISORY "); } } else if (IS_LEASE(fl)) { - seq_puts(f, "LEASE "); + if (fl->fl_flags & FL_DELEG) + seq_puts(f, "DELEG "); + else + seq_puts(f, "LEASE "); + if (lease_breaking(fl)) seq_puts(f, "BREAKING "); else if (fl->fl_file) -- cgit v0.10.2 From 566709bd627caf933ab8edffaf598203a0c5c8b2 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 11 Aug 2014 14:09:35 -0400 Subject: locks: don't call locks_release_private from locks_copy_lock All callers of locks_copy_lock pass in a brand new file_lock struct, so there's no need to call locks_release_private on it. Replace that with a warning that fires in the event that we receive a target lock that doesn't look like it's properly initialized. Acked-by: J. Bruce Fields Signed-off-by: Jeff Layton diff --git a/fs/locks.c b/fs/locks.c index 356667a..2c2d4f5 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -285,7 +285,8 @@ EXPORT_SYMBOL(__locks_copy_lock); void locks_copy_lock(struct file_lock *new, struct file_lock *fl) { - locks_release_private(new); + /* "new" must be a freshly-initialized lock */ + WARN_ON_ONCE(new->fl_ops); __locks_copy_lock(new, fl); new->fl_file = fl->fl_file; -- cgit v0.10.2 From b84d49f9440b2b039828f3eb114e4bd4ebeb0c54 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 12 Aug 2014 08:03:49 -0400 Subject: locks: don't reuse file_lock in __posix_lock_file Currently in the case where a new file lock completely replaces the old one, we end up overwriting the existing lock with the new info. This means that we have to call fl_release_private inside i_lock. Change the code to instead copy the info to new_fl, insert that lock into the correct spot and then delete the old lock. In a later patch, we'll defer the freeing of the old lock until after the i_lock has been dropped. Acked-by: J. Bruce Fields Signed-off-by: Jeff Layton diff --git a/fs/locks.c b/fs/locks.c index 2c2d4f5..7dd4def 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1022,18 +1022,21 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str locks_delete_lock(before); continue; } - /* Replace the old lock with the new one. - * Wake up anybody waiting for the old one, - * as the change in lock type might satisfy - * their needs. + /* + * Replace the old lock with new_fl, and + * remove the old one. It's safe to do the + * insert here since we know that we won't be + * using new_fl later, and that the lock is + * just replacing an existing lock. */ - locks_wake_up_blocks(fl); - fl->fl_start = request->fl_start; - fl->fl_end = request->fl_end; - fl->fl_type = request->fl_type; - locks_release_private(fl); - locks_copy_private(fl, request); - request = fl; + error = -ENOLCK; + if (!new_fl) + goto out; + locks_copy_lock(new_fl, request); + request = new_fl; + new_fl = NULL; + locks_delete_lock(before); + locks_insert_lock(before, request); added = true; } } -- cgit v0.10.2 From ed9814d85810c27670987b40c77e8a07105838fe Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 11 Aug 2014 14:20:31 -0400 Subject: locks: defer freeing locks in locks_delete_lock until after i_lock has been dropped In commit 72f98e72551fa (locks: turn lock_flocks into a spinlock), we moved from using the BKL to a global spinlock. With this change, we lost the ability to block in the fl_release_private operation. This is problematic for NFS (and probably some other filesystems as well). Add a new list_head argument to locks_delete_lock. If that argument is non-NULL, then queue any locks that we want to free to the list instead of freeing them. Then, add a new locks_dispose_list function that will walk such a list and call locks_free_lock on them after the i_lock has been dropped. Finally, change all of the callers of locks_delete_lock to pass in a list_head, except for lease_modify. That function can be called long after the i_lock has been acquired. Deferring the freeing of a lease after unlocking it in that function is non-trivial until we overhaul some of the spinlocking in the lease code. Currently though, no filesystem that sets fl_release_private supports leases, so this is not currently a problem. We'll eventually want to make the same change in the lease code, but it needs a lot more work before we can reasonably do so. Acked-by: J. Bruce Fields Signed-off-by: Jeff Layton diff --git a/fs/locks.c b/fs/locks.c index 7dd4def..4ce087c 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -247,6 +247,18 @@ void locks_free_lock(struct file_lock *fl) } EXPORT_SYMBOL(locks_free_lock); +static void +locks_dispose_list(struct list_head *dispose) +{ + struct file_lock *fl; + + while (!list_empty(dispose)) { + fl = list_first_entry(dispose, struct file_lock, fl_block); + list_del_init(&fl->fl_block); + locks_free_lock(fl); + } +} + void locks_init_lock(struct file_lock *fl) { memset(fl, 0, sizeof(struct file_lock)); @@ -651,12 +663,16 @@ static void locks_unlink_lock(struct file_lock **thisfl_p) * * Must be called with i_lock held! */ -static void locks_delete_lock(struct file_lock **thisfl_p) +static void locks_delete_lock(struct file_lock **thisfl_p, + struct list_head *dispose) { struct file_lock *fl = *thisfl_p; locks_unlink_lock(thisfl_p); - locks_free_lock(fl); + if (dispose) + list_add(&fl->fl_block, dispose); + else + locks_free_lock(fl); } /* Determine if lock sys_fl blocks lock caller_fl. Common functionality @@ -812,6 +828,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) struct inode * inode = file_inode(filp); int error = 0; int found = 0; + LIST_HEAD(dispose); if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) { new_fl = locks_alloc_lock(); @@ -834,7 +851,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) if (request->fl_type == fl->fl_type) goto out; found = 1; - locks_delete_lock(before); + locks_delete_lock(before, &dispose); break; } @@ -881,6 +898,7 @@ out: spin_unlock(&inode->i_lock); if (new_fl) locks_free_lock(new_fl); + locks_dispose_list(&dispose); return error; } @@ -894,6 +912,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str struct file_lock **before; int error; bool added = false; + LIST_HEAD(dispose); /* * We may need two file_lock structures for this operation, @@ -989,7 +1008,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str else request->fl_end = fl->fl_end; if (added) { - locks_delete_lock(before); + locks_delete_lock(before, &dispose); continue; } request = fl; @@ -1019,7 +1038,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str * one (This may happen several times). */ if (added) { - locks_delete_lock(before); + locks_delete_lock(before, &dispose); continue; } /* @@ -1035,7 +1054,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str locks_copy_lock(new_fl, request); request = new_fl; new_fl = NULL; - locks_delete_lock(before); + locks_delete_lock(before, &dispose); locks_insert_lock(before, request); added = true; } @@ -1097,6 +1116,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str locks_free_lock(new_fl); if (new_fl2) locks_free_lock(new_fl2); + locks_dispose_list(&dispose); return error; } @@ -1272,7 +1292,7 @@ int lease_modify(struct file_lock **before, int arg) printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); fl->fl_fasync = NULL; } - locks_delete_lock(before); + locks_delete_lock(before, NULL); } return 0; } @@ -2324,6 +2344,7 @@ void locks_remove_file(struct file *filp) struct inode * inode = file_inode(filp); struct file_lock *fl; struct file_lock **before; + LIST_HEAD(dispose); if (!inode->i_flock) return; @@ -2369,12 +2390,13 @@ void locks_remove_file(struct file *filp) fl->fl_type, fl->fl_flags, fl->fl_start, fl->fl_end); - locks_delete_lock(before); + locks_delete_lock(before, &dispose); continue; } before = &fl->fl_next; } spin_unlock(&inode->i_lock); + locks_dispose_list(&dispose); } /** -- cgit v0.10.2 From 2dfb928f7e5977a3faac943c134bbda5ae492629 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 11 Aug 2014 18:14:12 -0400 Subject: locks: move locks_free_lock calls in do_fcntl_add_lease outside spinlock There's no need to call locks_free_lock here while still holding the i_lock. Defer that until the lock has been dropped. Acked-by: J. Bruce Fields Signed-off-by: Jeff Layton diff --git a/fs/locks.c b/fs/locks.c index 4ce087c..cb66fb0 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1761,13 +1761,10 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) ret = fl; spin_lock(&inode->i_lock); error = __vfs_setlease(filp, arg, &ret); - if (error) { - spin_unlock(&inode->i_lock); - locks_free_lock(fl); - goto out_free_fasync; - } - if (ret != fl) - locks_free_lock(fl); + if (error) + goto out_unlock; + if (ret == fl) + fl = NULL; /* * fasync_insert_entry() returns the old entry if any. @@ -1779,9 +1776,10 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) new = NULL; error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); +out_unlock: spin_unlock(&inode->i_lock); - -out_free_fasync: + if (fl) + locks_free_lock(fl); if (new) fasync_free(new); return error; -- cgit v0.10.2 From 2ece173e4715031c031de9114491eee80a69cf68 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 12 Aug 2014 10:38:07 -0400 Subject: locks: update Locking documentation to clarify fl_release_private behavior Acked-by: J. Bruce Fields Signed-off-by: Jeff Layton diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index b18dd17..f1997e9 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -349,7 +349,11 @@ prototypes: locking rules: inode->i_lock may block fl_copy_lock: yes no -fl_release_private: maybe no +fl_release_private: maybe maybe[1] + +[1]: ->fl_release_private for flock or POSIX locks is currently allowed +to block. Leases however can still be freed while the i_lock is held and +so fl_release_private called on a lease should not block. ----------------------- lock_manager_operations --------------------------- prototypes: -- cgit v0.10.2