summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-12-09 07:11:22 (GMT)
committerJens Axboe <jens.axboe@oracle.com>2008-12-29 07:29:50 (GMT)
commitabf137dd7712132ee56d5b3143c2ff61a72a5faa (patch)
tree8334f03c598343bb93340f081fcde5ba659b440b /kernel
parent392ddc32982a5c661dd90dd49a3cb37f1c68b782 (diff)
downloadlinux-abf137dd7712132ee56d5b3143c2ff61a72a5faa.tar.xz
aio: make the lookup_ioctx() lockless
The mm->ioctx_list is currently protected by a reader-writer lock, so we always grab that lock on the read side for doing ioctx lookups. As the workload is extremely reader biased, turn this into an rcu hlist so we can make lookup_ioctx() lockless. Get rid of the rwlock and use a spinlock for providing update side exclusion. There's usually only 1 entry on this list, so it doesn't make sense to look into fancier data structures. Reviewed-by: Jeff Moyer <jmoyer@redhat.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 6144b36..43cbf30 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -415,8 +415,8 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
set_mm_counter(mm, file_rss, 0);
set_mm_counter(mm, anon_rss, 0);
spin_lock_init(&mm->page_table_lock);
- rwlock_init(&mm->ioctx_list_lock);
- mm->ioctx_list = NULL;
+ spin_lock_init(&mm->ioctx_lock);
+ INIT_HLIST_HEAD(&mm->ioctx_list);
mm->free_area_cache = TASK_UNMAPPED_BASE;
mm->cached_hole_size = ~0UL;
mm_init_owner(mm, p);