summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-27 22:08:12 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-27 22:08:12 (GMT)
commitac6c9e2bed093c4b60e313674fb7aec4f264c3d4 (patch)
tree6eccadffcc04a7b66021ab3bd114cafcb52181c6 /arch/s390
parent33c0022f0e687b0161a9bb84a5671df932551e3a (diff)
parent1cf35d47712dd5dc4d62c6ce984f04ac6eab0408 (diff)
downloadlinux-ac6c9e2bed093c4b60e313674fb7aec4f264c3d4.tar.xz
Merge branch 'safe-dirty-tlb-flush'
This merges the patch to fix possible loss of dirty bit on munmap() or madvice(DONTNEED). If there are concurrent writers on other CPU's that have the unmapped/unneeded page in their TLBs, their writes to the page could possibly get lost if a third CPU raced with the TLB flush and did a page_mkclean() before the page was fully written. Admittedly, if you unmap() or madvice(DONTNEED) an area _while_ another thread is still busy writing to it, you deserve all the lost writes you could get. But we kernel people hold ourselves to higher quality standards than "crazy people deserve to lose", because, well, we've seen people do all kinds of crazy things. So let's get it right, just because we can, and we don't have to worry about it. * safe-dirty-tlb-flush: mm: split 'tlb_flush_mmu()' into tlb flushing and memory freeing parts
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/tlb.h13
1 files changed, 12 insertions, 1 deletions
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index c544b6f..a25f09f 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -59,12 +59,23 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
tlb->batch = NULL;
}
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
__tlb_flush_mm_lazy(tlb->mm);
+}
+
+static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
tlb_table_flush(tlb);
}
+
+static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+ tlb_flush_mmu_tlbonly(tlb);
+ tlb_flush_mmu_free(tlb);
+}
+
static inline void tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end)
{