summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/mem.c')
-rw-r--r--arch/powerpc/mm/mem.c36
1 files changed, 35 insertions, 1 deletions
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 0988a26..af90b90 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -34,6 +34,10 @@
#include <linux/suspend.h>
#include <linux/memblock.h>
#include <linux/hugetlb.h>
+
+/* See hook_usdpaa_tlb1() */
+#include <linux/fsl_usdpaa.h>
+
#include <linux/slab.h>
#include <asm/pgalloc.h>
@@ -289,7 +293,9 @@ void __init paging_init(void)
max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT;
max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
#else
- max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
+ max_zone_pfns[ZONE_DMA] = min_t(phys_addr_t, top_of_ram,
+ 1ull << 31) >> PAGE_SHIFT;
+ max_zone_pfns[ZONE_NORMAL] = top_of_ram >> PAGE_SHIFT;
#endif
free_area_init_nodes(max_zone_pfns);
@@ -496,6 +502,32 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
}
EXPORT_SYMBOL(flush_icache_user_range);
+#ifdef CONFIG_FSL_USDPAA
+/*
+ * NB: this 'usdpaa' check+hack is to create TLB1 entries to cover the buffer
+ * memory used by run-to-completion UIO-based apps ("User-Space DataPath
+ * Acceleration Architecture"). It is expected to be phased out once HugeTLB
+ * support is hooked up with support for physical address conversion. The other
+ * half of this hack is in drivers/misc/fsl_usdpaa.c.
+ */
+static inline void hook_usdpaa_tlb1(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ unsigned long pfn = pte_pfn(*ptep);
+ u64 phys_addr;
+ u64 size;
+ int tlb_idx = usdpaa_test_fault(pfn, &phys_addr, &size);
+ if (tlb_idx != -1) {
+ unsigned long va = address & ~(size - 1);
+ flush_tlb_mm(vma->vm_mm);
+ settlbcam(tlb_idx, va, phys_addr, size, pte_val(*ptep),
+ mfspr(SPRN_PID));
+ }
+}
+#else
+#define hook_usdpaa_tlb1(a, b, c) do { } while (0)
+#endif
+
/*
* This is called at the end of handling a user page fault, when the
* fault has been handled by updating a PTE in the linux page tables.
@@ -529,6 +561,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
else if (trap != 0x300)
return;
hash_preload(vma->vm_mm, address, access, trap);
+#elif defined(CONFIG_FSL_USDPAA)
+ hook_usdpaa_tlb1(vma, address, ptep);
#endif /* CONFIG_PPC_STD_MMU */
#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
&& defined(CONFIG_HUGETLB_PAGE)