summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@intel.com>2012-06-28 01:02:24 (GMT)
committerH. Peter Anvin <hpa@zytor.com>2012-06-28 02:29:14 (GMT)
commiteffee4b9b3b0aa5770bcd98de5f672b05b27703c (patch)
treeb167657ec2ba05797b925a93e7e1b45222ac5ac3 /arch
parent52aec3308db85f4e9f5c8b9f5dc4fbd0138c6fa4 (diff)
downloadlinux-effee4b9b3b0aa5770bcd98de5f672b05b27703c.tar.xz
x86/tlb: do flush_tlb_kernel_range by 'invlpg'
This patch do flush_tlb_kernel_range by 'invlpg'. The performance pay and gain was analyzed in previous patch (x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range). In the testing: http://lkml.org/lkml/2012/6/21/10 The pay is mostly covered by long kernel path, but the gain is still quite clear, memory access in user APP can increase 30+% when kernel execute this funtion. Signed-off-by: Alex Shi <alex.shi@intel.com> Link: http://lkml.kernel.org/r/1340845344-27557-10-git-send-email-alex.shi@intel.com Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/tlbflush.h13
-rw-r--r--arch/x86/mm/tlb.c30
2 files changed, 37 insertions, 6 deletions
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 621b959..b5a27bd 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -123,6 +123,12 @@ static inline void reset_lazy_tlbstate(void)
{
}
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ flush_tlb_all();
+}
+
#else /* SMP */
#include <asm/smp.h>
@@ -139,6 +145,7 @@ extern void flush_tlb_current_task(void);
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#define flush_tlb() flush_tlb_current_task()
@@ -168,10 +175,4 @@ static inline void reset_lazy_tlbstate(void)
native_flush_tlb_others(mask, mm, start, end)
#endif
-static inline void flush_tlb_kernel_range(unsigned long start,
- unsigned long end)
-{
- flush_tlb_all();
-}
-
#endif /* _ASM_X86_TLBFLUSH_H */
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 2b5f506..613cd83 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -264,6 +264,36 @@ void flush_tlb_all(void)
on_each_cpu(do_flush_tlb_all, NULL, 1);
}
+static void do_kernel_range_flush(void *info)
+{
+ struct flush_tlb_info *f = info;
+ unsigned long addr;
+
+ /* flush range by one by one 'invlpg' */
+ for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
+ __flush_tlb_single(addr);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned act_entries;
+ struct flush_tlb_info info;
+
+ /* In modern CPU, last level tlb used for both data/ins */
+ act_entries = tlb_lld_4k[ENTRIES];
+
+ /* Balance as user space task's flush, a bit conservative */
+ if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 ||
+ (end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift)
+
+ on_each_cpu(do_flush_tlb_all, NULL, 1);
+ else {
+ info.flush_start = start;
+ info.flush_end = end;
+ on_each_cpu(do_kernel_range_flush, &info, 1);
+ }
+}
+
#ifdef CONFIG_DEBUG_TLBFLUSH
static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)