summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2009-10-26 19:24:31 (GMT)
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-10-30 06:20:57 (GMT)
commitf71dc176aa06359681c30ba6877ffccab6fba3a6 (patch)
treeb72d97c2db323ab94399cd2633108c0a00a5da31 /arch/powerpc/mm
parent8be8cf5b47f72096e42bf88cc3afff7a942a346c (diff)
downloadlinux-fsl-qoriq-f71dc176aa06359681c30ba6877ffccab6fba3a6.tar.xz
powerpc/mm: Make hpte_need_flush() correctly mask for multiple page sizes
Currently, hpte_need_flush() only correctly flushes the given address for normal pages. Callers for hugepages are required to mask the address themselves. But hpte_need_flush() already looks up the page sizes for its own reasons, so this is a rather silly imposition on the callers. This patch alters it to mask based on the pagesize it has looked up itself, and removes the awkward masking code in the hugepage caller. Signed-off-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hugetlbpage.c6
-rw-r--r--arch/powerpc/mm/tlb_hash64.c8
2 files changed, 4 insertions, 10 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 90df6ff..3d542a9 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -445,11 +445,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
* necessary anymore if we make hpte_need_flush() get the
* page size from the slices
*/
- unsigned int psize = get_slice_psize(mm, addr);
- unsigned int shift = mmu_psize_to_shift(psize);
- unsigned long sz = ((1UL) << shift);
- struct hstate *hstate = size_to_hstate(sz);
- pte_update(mm, addr & hstate->mask, ptep, ~0UL, 1);
+ pte_update(mm, addr, ptep, ~0UL, 1);
}
*ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
}
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 2b2f35f..282d930 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -53,11 +53,6 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
i = batch->index;
- /* We mask the address for the base page size. Huge pages will
- * have applied their own masking already
- */
- addr &= PAGE_MASK;
-
/* Get page size (maybe move back to caller).
*
* NOTE: when using special 64K mappings in 4K environment like
@@ -75,6 +70,9 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
} else
psize = pte_pagesize_index(mm, addr, pte);
+ /* Mask the address for the correct page size */
+ addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
+
/* Build full vaddr */
if (!is_kernel_addr(addr)) {
ssize = user_segment_size(addr);