summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2013-10-29 20:03:43 (GMT)
committerScott Wood <scottwood@freescale.com>2013-10-29 20:03:43 (GMT)
commitae60d5d27c429b13cf28a09ab8b9d30682433c5a (patch)
tree16b67511ef66b0580c267a5438d1face3a3778e6 /arch/powerpc/mm
parentb095c5c2577aeedce2db847fa117596628d4e7cb (diff)
parentd0ebef8230e267ec47d4d4a65fe3262e2ebb8026 (diff)
downloadlinux-fsl-qoriq-ae60d5d27c429b13cf28a09ab8b9d30682433c5a.tar.xz
Revert to 3.8 (no rt, no stable)
This is a merge from rtmerge, which has been similarly reverted. Conflicts: drivers/crypto/caam/caamalg.c drivers/misc/Makefile
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/fault.c2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c22
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c11
-rw-r--r--arch/powerpc/mm/numa.c2
-rw-r--r--arch/powerpc/mm/pgtable_64.c2
-rw-r--r--arch/powerpc/mm/slb_low.S50
-rw-r--r--arch/powerpc/mm/tlb_hash64.c2
7 files changed, 44 insertions, 47 deletions
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 66fdd82..3a8489a 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -259,7 +259,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
if (!arch_irq_disabled_regs(regs))
local_irq_enable();
- if (!mm || pagefault_disabled()) {
+ if (in_atomic() || mm == NULL) {
if (!user_mode(regs))
return SIGSEGV;
/* in_atomic() in user mode is really bad,
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 004630b..3a292be 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -194,11 +194,6 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
unsigned long tprot = prot;
- /*
- * If we hit a bad address return error.
- */
- if (!vsid)
- return -1;
/* Make kernel text executable */
if (overlaps_kernel_text(vaddr, vaddr + step))
tprot &= ~HPTE_R_N;
@@ -763,8 +758,6 @@ void __init early_init_mmu(void)
/* Initialize stab / SLB management */
if (mmu_has_feature(MMU_FTR_SLB))
slb_initialize();
- else
- stab_initialize(get_paca()->stab_real);
}
#ifdef CONFIG_SMP
@@ -928,6 +921,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
ea, access, trap);
+ if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
+ DBG_LOW(" out of pgtable range !\n");
+ return 1;
+ }
+
/* Get region & vsid */
switch (REGION_ID(ea)) {
case USER_REGION_ID:
@@ -958,11 +956,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
}
DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
- /* Bad address. */
- if (!vsid) {
- DBG_LOW("Bad address!\n");
- return 1;
- }
/* Get pgdir */
pgdir = mm->pgd;
if (pgdir == NULL)
@@ -1132,8 +1125,6 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
/* Get VSID */
ssize = user_segment_size(ea);
vsid = get_vsid(mm->context.id, ea, ssize);
- if (!vsid)
- return;
/* Hash doesn't like irqs */
local_irq_save(flags);
@@ -1226,9 +1217,6 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
- /* Don't create HPTE entries for bad address */
- if (!vsid)
- return;
ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),
mode, HPTE_V_BOLTED,
mmu_linear_psize, mmu_kernel_ssize);
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index d1d1b92..40bc5b0 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -29,6 +29,15 @@
static DEFINE_SPINLOCK(mmu_context_lock);
static DEFINE_IDA(mmu_context_ida);
+/*
+ * 256MB segment
+ * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments
+ * available for user mappings. Each segment contains 2^28 bytes. Each
+ * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
+ * (19 == 37 + 28 - 46).
+ */
+#define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1)
+
int __init_new_context(void)
{
int index;
@@ -47,7 +56,7 @@ again:
else if (err)
return err;
- if (index > MAX_USER_CONTEXT) {
+ if (index > MAX_CONTEXT) {
spin_lock(&mmu_context_lock);
ida_remove(&mmu_context_ida, index);
spin_unlock(&mmu_context_lock);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 6a252c4..bba87ca 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -201,7 +201,7 @@ int __node_distance(int a, int b)
int distance = LOCAL_DISTANCE;
if (!form1_affinity)
- return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
+ return distance;
for (i = 0; i < distance_ref_points_depth; i++) {
if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 654258f..e212a27 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -61,7 +61,7 @@
#endif
#ifdef CONFIG_PPC_STD_MMU_64
-#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
+#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
#error TASK_SIZE_USER64 exceeds user VSID range
#endif
#endif
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 17aa6df..1a16ca2 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -31,15 +31,10 @@
* No other registers are examined or changed.
*/
_GLOBAL(slb_allocate_realmode)
- /*
- * check for bad kernel/user address
- * (ea & ~REGION_MASK) >= PGTABLE_RANGE
- */
- rldicr. r9,r3,4,(63 - 46 - 4)
- bne- 8f
+ /* r3 = faulting address */
srdi r9,r3,60 /* get region */
- srdi r10,r3,SID_SHIFT /* get esid */
+ srdi r10,r3,28 /* get esid */
cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
/* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
@@ -61,14 +56,12 @@ _GLOBAL(slb_allocate_realmode)
*/
_GLOBAL(slb_miss_kernel_load_linear)
li r11,0
+ li r9,0x1
/*
- * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
- * r9 = region id.
+ * for 1T we shift 12 bits more. slb_finish_load_1T will do
+ * the necessary adjustment
*/
- addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
- addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
-
-
+ rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
BEGIN_FTR_SECTION
b slb_finish_load
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
@@ -98,19 +91,24 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
_GLOBAL(slb_miss_kernel_load_io)
li r11,0
6:
+ li r9,0x1
/*
- * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
- * r9 = region id.
+ * for 1T we shift 12 bits more. slb_finish_load_1T will do
+ * the necessary adjustment
*/
- addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
- addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
-
+ rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
BEGIN_FTR_SECTION
b slb_finish_load
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
b slb_finish_load_1T
-0:
+0: /* user address: proto-VSID = context << 15 | ESID. First check
+ * if the address is within the boundaries of the user region
+ */
+ srdi. r9,r10,USER_ESID_BITS
+ bne- 8f /* invalid ea bits set */
+
+
/* when using slices, we extract the psize off the slice bitmaps
* and then we need to get the sllp encoding off the mmu_psize_defs
* array.
@@ -166,13 +164,15 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
ld r9,PACACONTEXTID(r13)
BEGIN_FTR_SECTION
cmpldi r10,0x1000
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
+ rldimi r10,r9,USER_ESID_BITS,0
+BEGIN_FTR_SECTION
bge slb_finish_load_1T
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
b slb_finish_load
8: /* invalid EA */
li r10,0 /* BAD_VSID */
- li r9,0 /* BAD_VSID */
li r11,SLB_VSID_USER /* flags don't much matter */
b slb_finish_load
@@ -221,6 +221,8 @@ _GLOBAL(slb_allocate_user)
/* get context to calculate proto-VSID */
ld r9,PACACONTEXTID(r13)
+ rldimi r10,r9,USER_ESID_BITS,0
+
/* fall through slb_finish_load */
#endif /* __DISABLED__ */
@@ -229,10 +231,9 @@ _GLOBAL(slb_allocate_user)
/*
* Finish loading of an SLB entry and return
*
- * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
+ * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
*/
slb_finish_load:
- rldimi r10,r9,ESID_BITS,0
ASM_VSID_SCRAMBLE(r10,r9,256M)
/*
* bits above VSID_BITS_256M need to be ignored from r10
@@ -297,11 +298,10 @@ _GLOBAL(slb_compare_rr_to_size)
/*
* Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
*
- * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
+ * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9
*/
slb_finish_load_1T:
- srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */
- rldimi r10,r9,ESID_BITS_1T,0
+ srdi r10,r10,40-28 /* get 1T ESID */
ASM_VSID_SCRAMBLE(r10,r9,1T)
/*
* bits above VSID_BITS_1T need to be ignored from r10
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 023ec8a..0d82ef5 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -82,11 +82,11 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
if (!is_kernel_addr(addr)) {
ssize = user_segment_size(addr);
vsid = get_vsid(mm->context.id, addr, ssize);
+ WARN_ON(vsid == 0);
} else {
vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
ssize = mmu_kernel_ssize;
}
- WARN_ON(vsid == 0);
vpn = hpt_vpn(addr, vsid, ssize);
rpte = __real_pte(__pte(pte), ptep);