summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/cachetlb.txt9
-rw-r--r--arch/ia64/mm/tlb.c2
-rw-r--r--include/asm-parisc/tlbflush.h3
3 files changed, 4 insertions, 10 deletions
diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt
index e132fb1..7eb715e 100644
--- a/Documentation/cachetlb.txt
+++ b/Documentation/cachetlb.txt
@@ -49,9 +49,6 @@ changes occur:
page table operations such as what happens during
fork, and exec.
- Platform developers note that generic code will always
- invoke this interface without mm->page_table_lock held.
-
3) void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
@@ -72,9 +69,6 @@ changes occur:
call flush_tlb_page (see below) for each entry which may be
modified.
- Platform developers note that generic code will always
- invoke this interface with mm->page_table_lock held.
-
4) void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
This time we need to remove the PAGE_SIZE sized translation
@@ -93,9 +87,6 @@ changes occur:
This is used primarily during fault processing.
- Platform developers note that generic code will always
- invoke this interface with mm->page_table_lock held.
-
5) void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index c93e0f2..c79a9b9 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -158,10 +158,12 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long
# ifdef CONFIG_SMP
platform_global_tlb_purge(mm, start, end, nbits);
# else
+ preempt_disable();
do {
ia64_ptcl(start, (nbits<<2));
start += (1UL << nbits);
} while (start < end);
+ preempt_enable();
# endif
ia64_srlz_i(); /* srlz.i implies srlz.d */
diff --git a/include/asm-parisc/tlbflush.h b/include/asm-parisc/tlbflush.h
index 84af4ab..e97aa8d 100644
--- a/include/asm-parisc/tlbflush.h
+++ b/include/asm-parisc/tlbflush.h
@@ -88,7 +88,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
flush_tlb_all();
else {
-
+ preempt_disable();
mtsp(vma->vm_mm->context,1);
purge_tlb_start();
if (split_tlb) {
@@ -102,6 +102,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
pdtlb(start);
start += PAGE_SIZE;
}
+ preempt_enable();
}
purge_tlb_end();
}