summaryrefslogtreecommitdiff
path: root/fs/f2fs/f2fs.h
diff options
context:
space:
mode:
authorYu Chao <chao2.yu@samsung.com>2013-09-12 03:17:51 (GMT)
committerJaegeuk Kim <jaegeuk.kim@samsung.com>2013-09-24 08:45:48 (GMT)
commite76eebee70948060d80b5f9edd2d97062049df53 (patch)
tree0e553ac012ec66bbd50cc1ab0f3165c307b1b0e0 /fs/f2fs/f2fs.h
parent4a10c2ac2f368583138b774ca41fac4207911983 (diff)
downloadlinux-fsl-qoriq-e76eebee70948060d80b5f9edd2d97062049df53.tar.xz
f2fs: optimize fs_lock for better performance
There is a performance problem: when all sbi->fs_lock are holded, then all the following threads may get the same next_lock value from sbi->next_lock_num in function mutex_lock_op, and wait for the same lock(fs_lock[next_lock]), it may cause performance reduce. So we move the sbi->next_lock_num++ before getting lock, this will average the following threads if all sbi->fs_lock are holded. v1-->v2: Drop the needless spin_lock as Jaegeuk suggested. Suggested-by: Jaegeuk Kim <jaegeuk.kim@samsung.com> Signed-off-by: Yu Chao <chao2.yu@samsung.com> Signed-off-by: Gu Zheng <guz.fnst@cn.fujitsu.com> Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Diffstat (limited to 'fs/f2fs/f2fs.h')
-rw-r--r--fs/f2fs/f2fs.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 608f0df..7fd99d8 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -544,15 +544,15 @@ static inline void mutex_unlock_all(struct f2fs_sb_info *sbi)
static inline int mutex_lock_op(struct f2fs_sb_info *sbi)
{
- unsigned char next_lock = sbi->next_lock_num % NR_GLOBAL_LOCKS;
+ unsigned char next_lock;
int i = 0;
for (; i < NR_GLOBAL_LOCKS; i++)
if (mutex_trylock(&sbi->fs_lock[i]))
return i;
+ next_lock = sbi->next_lock_num++ % NR_GLOBAL_LOCKS;
mutex_lock(&sbi->fs_lock[next_lock]);
- sbi->next_lock_num++;
return next_lock;
}