From b32714ba29358a688ef337d5297bf4bdc9f596dc Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Fri, 9 Jan 2009 07:04:15 -0800 Subject: partial revert of asynchronous inode delete let the core of this one bake in -next as well, but leave some of the infrastructure in place. Signed-off-by: Arjan van de Ven diff --git a/fs/inode.c b/fs/inode.c index 0013ac1..913ab2d 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1139,11 +1139,16 @@ EXPORT_SYMBOL(remove_inode_hash); * I_FREEING is set so that no-one will take a new reference to the inode while * it is being deleted. */ -static void generic_delete_inode_async(void *data, async_cookie_t cookie) +void generic_delete_inode(struct inode *inode) { - struct inode *inode = data; const struct super_operations *op = inode->i_sb->s_op; + list_del_init(&inode->i_list); + list_del_init(&inode->i_sb_list); + inode->i_state |= I_FREEING; + inodes_stat.nr_inodes--; + spin_unlock(&inode_lock); + security_inode_delete(inode); if (op->delete_inode) { @@ -1167,16 +1172,6 @@ static void generic_delete_inode_async(void *data, async_cookie_t cookie) destroy_inode(inode); } -void generic_delete_inode(struct inode *inode) -{ - list_del_init(&inode->i_list); - list_del_init(&inode->i_sb_list); - inode->i_state |= I_FREEING; - inodes_stat.nr_inodes--; - spin_unlock(&inode_lock); - async_schedule_special(generic_delete_inode_async, inode, &inode->i_sb->s_async_list); -} - EXPORT_SYMBOL(generic_delete_inode); static void generic_forget_inode(struct inode *inode) -- cgit v0.10.2 From cdb80f630be5cbc23d82331f24dc4704f75b64f4 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Fri, 9 Jan 2009 13:23:45 -0800 Subject: async: make async a command line option for now ... and have it default off. This does allow people to work with it for testing. Signed-off-by: Arjan van de Ven diff --git a/kernel/async.c b/kernel/async.c index 64cc916..f286e9f 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -65,6 +65,8 @@ static LIST_HEAD(async_pending); static LIST_HEAD(async_running); static DEFINE_SPINLOCK(async_lock); +static int async_enabled = 0; + struct async_entry { struct list_head list; async_cookie_t cookie; @@ -169,7 +171,7 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l * If we're out of memory or if there's too much work * pending already, we execute synchronously. */ - if (!entry || atomic_read(&entry_count) > MAX_WORK) { + if (!async_enabled || !entry || atomic_read(&entry_count) > MAX_WORK) { kfree(entry); spin_lock_irqsave(&async_lock, flags); newcookie = next_cookie++; @@ -316,8 +318,18 @@ static int async_manager_thread(void *unused) static int __init async_init(void) { - kthread_run(async_manager_thread, NULL, "async/mgr"); + if (async_enabled) + kthread_run(async_manager_thread, NULL, "async/mgr"); return 0; } +static int __init setup_async(char *str) +{ + async_enabled = 1; + return 1; +} + +__setup("fastboot", setup_async); + + core_initcall(async_init); -- cgit v0.10.2