summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2007-03-09 23:05:37 (GMT)
committerArnd Bergmann <arnd@klappe.arndb.de>2007-03-09 23:07:50 (GMT)
commit94b2a4393c500a620de90c3266d595926302e26b (patch)
treecde58177f430751b67a7aa47f3f89042e91357bf /arch/powerpc/mm
parent50b520d4efbce45281f58112789470ec7965fd33 (diff)
downloadlinux-fsl-qoriq-94b2a4393c500a620de90c3266d595926302e26b.tar.xz
[POWERPC] Fix spu SLB invalidations
The SPU code doesn't properly invalidate SPUs SLBs when necessary, for example when changing a segment size from the hugetlbfs code. In addition, it saves and restores the SLB content on context switches which makes it harder to properly handle those invalidations. This patch removes the saving & restoring for now, something more efficient might be found later on. It also adds a spu_flush_all_slbs(mm) that can be used by the core mm code to flush the SLBs of all SPEs that are running a given mm at the time of the flush. In order to do that, it adds a spinlock to the list of all SPEs and move some bits & pieces from spufs to spu_base.c Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c6
-rw-r--r--arch/powerpc/mm/hugetlbpage.c4
2 files changed, 10 insertions, 0 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index c0d2a69..3c7fe2c 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -685,6 +685,9 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
"non-cacheable mapping\n");
psize = mmu_vmalloc_psize = MMU_PAGE_4K;
}
+#ifdef CONFIG_SPE_BASE
+ spu_flush_all_slbs(mm);
+#endif
}
if (user_region) {
if (psize != get_paca()->context.user_psize) {
@@ -759,6 +762,9 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
mmu_psize_defs[MMU_PAGE_4K].sllp;
get_paca()->context = mm->context;
slb_flush_and_rebolt();
+#ifdef CONFIG_SPE_BASE
+ spu_flush_all_slbs(mm);
+#endif
}
}
if (mm->context.user_psize == MMU_PAGE_64K)
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 8c77c79..f6ffaaa 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -24,6 +24,7 @@
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/tlb.h>
+#include <asm/spu.h>
#include <linux/sysctl.h>
@@ -513,6 +514,9 @@ int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
if ((addr + len) > 0x100000000UL)
err = open_high_hpage_areas(current->mm,
HTLB_AREA_MASK(addr, len));
+#ifdef CONFIG_SPE_BASE
+ spu_flush_all_slbs(current->mm);
+#endif
if (err) {
printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)"
" failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n",