summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2014-04-08 01:00:49 (GMT)
committerScott Wood <scottwood@freescale.com>2014-04-08 19:58:35 (GMT)
commit47d2261a3fa71cde24263559a4219a25e50d8c89 (patch)
tree28774d5b330ccf1b777a3af222d8356918328013 /arch/powerpc/mm
parentfb7f27080adc65cd5f341bdf56a1d0c14f316c1b (diff)
parent5fb9d37f27351e42f002e372074249f92cbdf815 (diff)
downloadlinux-fsl-qoriq-47d2261a3fa71cde24263559a4219a25e50d8c89.tar.xz
Merge branch 'merge' into sdk-v1.6.x
This reverts v3.13-rc3+ (78fd82238d0e5716) to v3.12, except for commits which I noticed which appear relevant to the SDK. Signed-off-by: Scott Wood <scottwood@freescale.com> Conflicts: arch/powerpc/include/asm/kvm_host.h arch/powerpc/kvm/book3s_hv_rmhandlers.S arch/powerpc/kvm/book3s_interrupts.S arch/powerpc/kvm/e500.c arch/powerpc/kvm/e500mc.c arch/powerpc/sysdev/fsl_soc.h drivers/Kconfig drivers/cpufreq/ppc-corenet-cpufreq.c drivers/dma/fsldma.c drivers/dma/s3c24xx-dma.c drivers/misc/Makefile drivers/mmc/host/sdhci-of-esdhc.c drivers/mtd/devices/m25p80.c drivers/net/ethernet/freescale/gianfar.h drivers/platform/Kconfig drivers/platform/Makefile drivers/spi/spi-fsl-espi.c include/crypto/algapi.h include/linux/netdev_features.h include/linux/skbuff.h include/net/ip.h net/core/ethtool.c
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c4
-rw-r--r--arch/powerpc/mm/gup.c5
-rw-r--r--arch/powerpc/mm/hash_native_64.c46
-rw-r--r--arch/powerpc/mm/hash_utils_64.c38
-rw-r--r--arch/powerpc/mm/hugetlbpage.c2
-rw-r--r--arch/powerpc/mm/init_32.c5
-rw-r--r--arch/powerpc/mm/init_64.c51
-rw-r--r--arch/powerpc/mm/numa.c11
-rw-r--r--arch/powerpc/mm/pgtable.c19
-rw-r--r--arch/powerpc/mm/pgtable_32.c5
-rw-r--r--arch/powerpc/mm/pgtable_64.c7
-rw-r--r--arch/powerpc/mm/slice.c2
12 files changed, 75 insertions, 120 deletions
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index 7b6c107..6747eec 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -287,7 +287,9 @@ void __dma_free_coherent(size_t size, void *vaddr)
pte_clear(&init_mm, addr, ptep);
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
- __free_reserved_page(page);
+
+ ClearPageReserved(page);
+ __free_page(page);
}
}
addr += PAGE_SIZE;
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
index c5f734e..6936547 100644
--- a/arch/powerpc/mm/gup.c
+++ b/arch/powerpc/mm/gup.c
@@ -123,7 +123,6 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct mm_struct *mm = current->mm;
unsigned long addr, len, end;
unsigned long next;
- unsigned long flags;
pgd_t *pgdp;
int nr = 0;
@@ -157,7 +156,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* So long as we atomically load page table pointers versus teardown,
* we can follow the address down to the the page and take a ref on it.
*/
- local_irq_save(flags);
+ local_irq_disable();
pgdp = pgd_offset(mm, addr);
do {
@@ -180,7 +179,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
break;
} while (pgdp++, addr = next, addr != end);
- local_irq_restore(flags);
+ local_irq_enable();
return nr;
}
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 3ea26c2..c33d939 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -35,11 +35,7 @@
#define DBG_LOW(fmt...)
#endif
-#ifdef __BIG_ENDIAN__
#define HPTE_LOCK_BIT 3
-#else
-#define HPTE_LOCK_BIT (56+3)
-#endif
DEFINE_RAW_SPINLOCK(native_tlbie_lock);
@@ -176,7 +172,7 @@ static inline void tlbie(unsigned long vpn, int psize, int apsize,
static inline void native_lock_hpte(struct hash_pte *hptep)
{
- unsigned long *word = (unsigned long *)&hptep->v;
+ unsigned long *word = &hptep->v;
while (1) {
if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
@@ -188,7 +184,7 @@ static inline void native_lock_hpte(struct hash_pte *hptep)
static inline void native_unlock_hpte(struct hash_pte *hptep)
{
- unsigned long *word = (unsigned long *)&hptep->v;
+ unsigned long *word = &hptep->v;
clear_bit_unlock(HPTE_LOCK_BIT, word);
}
@@ -208,10 +204,10 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
}
for (i = 0; i < HPTES_PER_GROUP; i++) {
- if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
+ if (! (hptep->v & HPTE_V_VALID)) {
/* retry with lock held */
native_lock_hpte(hptep);
- if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
+ if (! (hptep->v & HPTE_V_VALID))
break;
native_unlock_hpte(hptep);
}
@@ -230,14 +226,14 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
i, hpte_v, hpte_r);
}
- hptep->r = cpu_to_be64(hpte_r);
+ hptep->r = hpte_r;
/* Guarantee the second dword is visible before the valid bit */
eieio();
/*
* Now set the first dword including the valid bit
* NOTE: this also unlocks the hpte
*/
- hptep->v = cpu_to_be64(hpte_v);
+ hptep->v = hpte_v;
__asm__ __volatile__ ("ptesync" : : : "memory");
@@ -258,12 +254,12 @@ static long native_hpte_remove(unsigned long hpte_group)
for (i = 0; i < HPTES_PER_GROUP; i++) {
hptep = htab_address + hpte_group + slot_offset;
- hpte_v = be64_to_cpu(hptep->v);
+ hpte_v = hptep->v;
if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
/* retry with lock held */
native_lock_hpte(hptep);
- hpte_v = be64_to_cpu(hptep->v);
+ hpte_v = hptep->v;
if ((hpte_v & HPTE_V_VALID)
&& !(hpte_v & HPTE_V_BOLTED))
break;
@@ -298,7 +294,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
native_lock_hpte(hptep);
- hpte_v = be64_to_cpu(hptep->v);
+ hpte_v = hptep->v;
/*
* We need to invalidate the TLB always because hpte_remove doesn't do
* a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
@@ -312,8 +308,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
} else {
DBG_LOW(" -> hit\n");
/* Update the HPTE */
- hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & ~(HPTE_R_PP | HPTE_R_N)) |
- (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)));
+ hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
+ (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
}
native_unlock_hpte(hptep);
@@ -338,7 +334,7 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
for (i = 0; i < HPTES_PER_GROUP; i++) {
hptep = htab_address + slot;
- hpte_v = be64_to_cpu(hptep->v);
+ hpte_v = hptep->v;
if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
/* HPTE matches */
@@ -373,9 +369,8 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
hptep = htab_address + slot;
/* Update the HPTE */
- hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
- ~(HPTE_R_PP | HPTE_R_N)) |
- (newpp & (HPTE_R_PP | HPTE_R_N)));
+ hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
+ (newpp & (HPTE_R_PP | HPTE_R_N));
/*
* Ensure it is out of the tlb too. Bolted entries base and
* actual page size will be same.
@@ -397,7 +392,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
want_v = hpte_encode_avpn(vpn, bpsize, ssize);
native_lock_hpte(hptep);
- hpte_v = be64_to_cpu(hptep->v);
+ hpte_v = hptep->v;
/*
* We need to invalidate the TLB always because hpte_remove doesn't do
@@ -463,7 +458,7 @@ static void native_hugepage_invalidate(struct mm_struct *mm,
hptep = htab_address + slot;
want_v = hpte_encode_avpn(vpn, psize, ssize);
native_lock_hpte(hptep);
- hpte_v = be64_to_cpu(hptep->v);
+ hpte_v = hptep->v;
/* Even if we miss, we need to invalidate the TLB */
if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
@@ -524,12 +519,11 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
int *psize, int *apsize, int *ssize, unsigned long *vpn)
{
unsigned long avpn, pteg, vpi;
- unsigned long hpte_v = be64_to_cpu(hpte->v);
- unsigned long hpte_r = be64_to_cpu(hpte->r);
+ unsigned long hpte_v = hpte->v;
unsigned long vsid, seg_off;
int size, a_size, shift;
/* Look at the 8 bit LP value */
- unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
+ unsigned int lp = (hpte->r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
if (!(hpte_v & HPTE_V_LARGE)) {
size = MMU_PAGE_4K;
@@ -618,7 +612,7 @@ static void native_hpte_clear(void)
* running, right? and for crash dump, we probably
* don't want to wait for a maybe bad cpu.
*/
- hpte_v = be64_to_cpu(hptep->v);
+ hpte_v = hptep->v;
/*
* Call __tlbie() here rather than tlbie() since we
@@ -670,7 +664,7 @@ static void native_flush_hash_range(unsigned long number, int local)
hptep = htab_address + slot;
want_v = hpte_encode_avpn(vpn, psize, ssize);
native_lock_hpte(hptep);
- hpte_v = be64_to_cpu(hptep->v);
+ hpte_v = hptep->v;
if (!HPTE_V_COMPARE(hpte_v, want_v) ||
!(hpte_v & HPTE_V_VALID))
native_unlock_hpte(hptep);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 6176b3c..bde8b55 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -251,18 +251,19 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node,
void *data)
{
char *type = of_get_flat_dt_prop(node, "device_type", NULL);
- __be32 *prop;
+ u32 *prop;
unsigned long size = 0;
/* We are scanning "cpu" nodes only */
if (type == NULL || strcmp(type, "cpu") != 0)
return 0;
- prop = of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", &size);
+ prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes",
+ &size);
if (prop == NULL)
return 0;
for (; size >= 4; size -= 4, ++prop) {
- if (be32_to_cpu(prop[0]) == 40) {
+ if (prop[0] == 40) {
DBG("1T segment support detected\n");
cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT;
return 1;
@@ -306,22 +307,23 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
void *data)
{
char *type = of_get_flat_dt_prop(node, "device_type", NULL);
- __be32 *prop;
+ u32 *prop;
unsigned long size = 0;
/* We are scanning "cpu" nodes only */
if (type == NULL || strcmp(type, "cpu") != 0)
return 0;
- prop = of_get_flat_dt_prop(node, "ibm,segment-page-sizes", &size);
+ prop = (u32 *)of_get_flat_dt_prop(node,
+ "ibm,segment-page-sizes", &size);
if (prop != NULL) {
pr_info("Page sizes from device-tree:\n");
size /= 4;
cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE);
while(size > 0) {
- unsigned int base_shift = be32_to_cpu(prop[0]);
- unsigned int slbenc = be32_to_cpu(prop[1]);
- unsigned int lpnum = be32_to_cpu(prop[2]);
+ unsigned int base_shift = prop[0];
+ unsigned int slbenc = prop[1];
+ unsigned int lpnum = prop[2];
struct mmu_psize_def *def;
int idx, base_idx;
@@ -354,8 +356,8 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
def->tlbiel = 0;
while (size > 0 && lpnum) {
- unsigned int shift = be32_to_cpu(prop[0]);
- int penc = be32_to_cpu(prop[1]);
+ unsigned int shift = prop[0];
+ int penc = prop[1];
prop += 2; size -= 2;
lpnum--;
@@ -388,8 +390,8 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
const char *uname, int depth,
void *data) {
char *type = of_get_flat_dt_prop(node, "device_type", NULL);
- __be64 *addr_prop;
- __be32 *page_count_prop;
+ unsigned long *addr_prop;
+ u32 *page_count_prop;
unsigned int expected_pages;
long unsigned int phys_addr;
long unsigned int block_size;
@@ -403,12 +405,12 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
if (page_count_prop == NULL)
return 0;
- expected_pages = (1 << be32_to_cpu(page_count_prop[0]));
+ expected_pages = (1 << page_count_prop[0]);
addr_prop = of_get_flat_dt_prop(node, "reg", NULL);
if (addr_prop == NULL)
return 0;
- phys_addr = be64_to_cpu(addr_prop[0]);
- block_size = be64_to_cpu(addr_prop[1]);
+ phys_addr = addr_prop[0];
+ block_size = addr_prop[1];
if (block_size != (16 * GB))
return 0;
printk(KERN_INFO "Huge page(16GB) memory: "
@@ -532,16 +534,16 @@ static int __init htab_dt_scan_pftsize(unsigned long node,
void *data)
{
char *type = of_get_flat_dt_prop(node, "device_type", NULL);
- __be32 *prop;
+ u32 *prop;
/* We are scanning "cpu" nodes only */
if (type == NULL || strcmp(type, "cpu") != 0)
return 0;
- prop = of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
+ prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
if (prop != NULL) {
/* pft_size[0] is the NUMA CEC cookie */
- ppc64_pft_size = be32_to_cpu(prop[1]);
+ ppc64_pft_size = prop[1];
return 1;
}
return 0;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 90bb6d9..d67db4b 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -633,6 +633,8 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
/*
* This function frees user-level page tables of a process.
+ *
+ * Must be called with pagetable lock held.
*/
void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index cff59f1..d47d3da 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -213,12 +213,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
*/
BUG_ON(first_memblock_base != 0);
-#ifdef CONFIG_PIN_TLB
- /* 8xx can only access 24MB at the moment */
- memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01800000));
-#else
/* 8xx can only access 8MB at the moment */
memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000));
-#endif
}
#endif /* CONFIG_8xx */
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index e3734ed..8ed035d 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -304,54 +304,5 @@ void register_page_bootmem_memmap(unsigned long section_nr,
struct page *start_page, unsigned long size)
{
}
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
-/*
- * We do not have access to the sparsemem vmemmap, so we fallback to
- * walking the list of sparsemem blocks which we already maintain for
- * the sake of crashdump. In the long run, we might want to maintain
- * a tree if performance of that linear walk becomes a problem.
- *
- * realmode_pfn_to_page functions can fail due to:
- * 1) As real sparsemem blocks do not lay in RAM continously (they
- * are in virtual address space which is not available in the real mode),
- * the requested page struct can be split between blocks so get_page/put_page
- * may fail.
- * 2) When huge pages are used, the get_page/put_page API will fail
- * in real mode as the linked addresses in the page struct are virtual
- * too.
- */
-struct page *realmode_pfn_to_page(unsigned long pfn)
-{
- struct vmemmap_backing *vmem_back;
- struct page *page;
- unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
- unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
-
- for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
- if (pg_va < vmem_back->virt_addr)
- continue;
-
- /* Check that page struct is not split between real pages */
- if ((pg_va + sizeof(struct page)) >
- (vmem_back->virt_addr + page_size))
- return NULL;
-
- page = (struct page *) (vmem_back->phys + pg_va -
- vmem_back->virt_addr);
- return page;
- }
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
-
-#elif defined(CONFIG_FLATMEM)
-
-struct page *realmode_pfn_to_page(unsigned long pfn)
-{
- struct page *page = pfn_to_page(pfn);
- return page;
-}
-EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
-
-#endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 078d3e0..c916127 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -195,7 +195,7 @@ static const __be32 *of_get_usable_memory(struct device_node *memory)
u32 len;
prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
if (!prop || len < sizeof(unsigned int))
- return NULL;
+ return 0;
return prop;
}
@@ -938,7 +938,8 @@ static void __init mark_reserved_regions_for_nid(int nid)
unsigned long start_pfn = physbase >> PAGE_SHIFT;
unsigned long end_pfn = PFN_UP(physbase + size);
struct node_active_region node_ar;
- unsigned long node_end_pfn = pgdat_end_pfn(node);
+ unsigned long node_end_pfn = node->node_start_pfn +
+ node->node_spanned_pages;
/*
* Check to make sure that this memblock.reserved area is
@@ -1153,7 +1154,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
* represented in the device tree as a node (i.e. memory@XXXX) for
* each memblock.
*/
-static int hot_add_node_scn_to_nid(unsigned long scn_addr)
+int hot_add_node_scn_to_nid(unsigned long scn_addr)
{
struct device_node *memory;
int nid = -1;
@@ -1234,7 +1235,7 @@ static u64 hot_add_drconf_memory_max(void)
struct device_node *memory = NULL;
unsigned int drconf_cell_cnt = 0;
u64 lmb_size = 0;
- const __be32 *dm = NULL;
+ const __be32 *dm = 0;
memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
if (memory) {
@@ -1534,7 +1535,7 @@ static void topology_work_fn(struct work_struct *work)
}
static DECLARE_WORK(topology_work, topology_work_fn);
-static void topology_schedule_update(void)
+void topology_schedule_update(void)
{
schedule_work(&topology_work);
}
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 841e0d0..edda589 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -32,6 +32,8 @@
#include <asm/tlbflush.h>
#include <asm/tlb.h>
+#include "mmu_decl.h"
+
static inline int is_exec_fault(void)
{
return current->thread.regs && TRAP(current->thread.regs) == 0x400;
@@ -70,7 +72,7 @@ struct page * maybe_pte_to_page(pte_t pte)
* support falls into the same category.
*/
-static pte_t set_pte_filter(pte_t pte)
+static pte_t set_pte_filter(pte_t pte, unsigned long addr)
{
pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
@@ -79,6 +81,17 @@ static pte_t set_pte_filter(pte_t pte)
if (!pg)
return pte;
if (!test_bit(PG_arch_1, &pg->flags)) {
+#ifdef CONFIG_8xx
+ /* On 8xx, cache control instructions (particularly
+ * "dcbst" from flush_dcache_icache) fault as write
+ * operation if there is an unpopulated TLB entry
+ * for the address in question. To workaround that,
+ * we invalidate the TLB here, thus avoiding dcbst
+ * misbehaviour.
+ */
+ /* 8xx doesn't care about PID, size or ind args */
+ _tlbil_va(addr, 0, 0, 0);
+#endif /* CONFIG_8xx */
flush_dcache_icache_page(pg);
set_bit(PG_arch_1, &pg->flags);
}
@@ -98,7 +111,7 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
* as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
* instead we "filter out" the exec permission for non clean pages.
*/
-static pte_t set_pte_filter(pte_t pte)
+static pte_t set_pte_filter(pte_t pte, unsigned long addr)
{
struct page *pg;
@@ -180,7 +193,7 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
* this context might not have been activated yet when this
* is called.
*/
- pte = set_pte_filter(pte);
+ pte = set_pte_filter(pte, addr);
/* Perform the setting of the PTE */
__set_pte_at(mm, addr, ptep, pte, 0);
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 5b96017..6c856fb 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -121,10 +121,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
ptepage = alloc_pages(flags, 0);
if (!ptepage)
return NULL;
- if (!pgtable_page_ctor(ptepage)) {
- __free_page(ptepage);
- return NULL;
- }
+ pgtable_page_ctor(ptepage);
return ptepage;
}
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 9d95786..536eec72 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -378,10 +378,6 @@ static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
__GFP_REPEAT | __GFP_ZERO);
if (!page)
return NULL;
- if (!kernel && !pgtable_page_ctor(page)) {
- __free_page(page);
- return NULL;
- }
ret = page_address(page);
spin_lock(&mm->page_table_lock);
@@ -396,6 +392,9 @@ static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
}
spin_unlock(&mm->page_table_lock);
+ if (!kernel)
+ pgtable_page_ctor(page);
+
return (pte_t *)ret;
}
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 7ce9cf3..3e99c14 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -258,7 +258,7 @@ static bool slice_scan_available(unsigned long addr,
slice = GET_HIGH_SLICE_INDEX(addr);
*boundary_addr = (slice + end) ?
((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
- return !!(available.high_slices & (1ul << slice));
+ return !!(available.high_slices & (1u << slice));
}
}