summaryrefslogtreecommitdiff
path: root/arch/sh/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/Makefile9
-rw-r--r--arch/sh/mm/cache-shx3.c44
-rw-r--r--arch/sh/mm/cache.c7
-rw-r--r--arch/sh/mm/fault_32.c14
-rw-r--r--arch/sh/mm/init.c173
-rw-r--r--arch/sh/mm/numa.c38
-rw-r--r--arch/sh/mm/pmb.c4
-rw-r--r--arch/sh/mm/tlb-debugfs.c179
-rw-r--r--arch/sh/mm/tlbflush_64.c20
9 files changed, 416 insertions, 72 deletions
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index 3dc8a8a..53f7c68 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -10,6 +10,7 @@ cacheops-$(CONFIG_CPU_SH3) := cache-sh3.o
cacheops-$(CONFIG_CPU_SH4) := cache-sh4.o flush-sh4.o
cacheops-$(CONFIG_CPU_SH5) := cache-sh5.o flush-sh4.o
cacheops-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o
+cacheops-$(CONFIG_CPU_SHX3) += cache-shx3.o
obj-y += $(cacheops-y)
@@ -18,13 +19,14 @@ mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o \
ioremap.o kmap.o pgtable.o tlbflush_$(BITS).o
obj-y += $(mmu-y)
-obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o
-ifdef CONFIG_DEBUG_FS
-obj-$(CONFIG_CPU_SH4) += cache-debugfs.o
+debugfs-y := asids-debugfs.o
+ifndef CONFIG_CACHE_OFF
+debugfs-$(CONFIG_CPU_SH4) += cache-debugfs.o
endif
ifdef CONFIG_MMU
+debugfs-$(CONFIG_CPU_SH4) += tlb-debugfs.o
tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o
tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o tlb-urb.o
tlb-$(CONFIG_CPU_SH5) := tlb-sh5.o
@@ -32,6 +34,7 @@ tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o tlb-urb.o
obj-y += $(tlb-y)
endif
+obj-$(CONFIG_DEBUG_FS) += $(debugfs-y)
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PMB) += pmb.o
obj-$(CONFIG_NUMA) += numa.o
diff --git a/arch/sh/mm/cache-shx3.c b/arch/sh/mm/cache-shx3.c
new file mode 100644
index 0000000..c0adbee
--- /dev/null
+++ b/arch/sh/mm/cache-shx3.c
@@ -0,0 +1,44 @@
+/*
+ * arch/sh/mm/cache-shx3.c - SH-X3 optimized cache ops
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <asm/cache.h>
+
+#define CCR_CACHE_SNM 0x40000 /* Hardware-assisted synonym avoidance */
+#define CCR_CACHE_IBE 0x1000000 /* ICBI broadcast */
+
+void __init shx3_cache_init(void)
+{
+ unsigned int ccr;
+
+ ccr = __raw_readl(CCR);
+
+ /*
+ * If we've got cache aliases, resolve them in hardware.
+ */
+ if (boot_cpu_data.dcache.n_aliases || boot_cpu_data.icache.n_aliases) {
+ ccr |= CCR_CACHE_SNM;
+
+ boot_cpu_data.icache.n_aliases = 0;
+ boot_cpu_data.dcache.n_aliases = 0;
+
+ pr_info("Enabling hardware synonym avoidance\n");
+ }
+
+#ifdef CONFIG_SMP
+ /*
+ * Broadcast I-cache block invalidations by default.
+ */
+ ccr |= CCR_CACHE_IBE;
+#endif
+
+ writel_uncached(ccr, CCR);
+}
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 0f4095d..ba401d1 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -334,6 +334,13 @@ void __init cpu_cache_init(void)
extern void __weak sh4_cache_init(void);
sh4_cache_init();
+
+ if ((boot_cpu_data.type == CPU_SH7786) ||
+ (boot_cpu_data.type == CPU_SHX3)) {
+ extern void __weak shx3_cache_init(void);
+
+ shx3_cache_init();
+ }
}
if (boot_cpu_data.family == CPU_FAMILY_SH5) {
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 8bf79e3..d4c34d7 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -200,7 +200,6 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
-survive:
fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
@@ -290,15 +289,10 @@ no_context:
*/
out_of_memory:
up_read(&mm->mmap_sem);
- if (is_global_init(current)) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
- printk("VM: killing process %s\n", tsk->comm);
- if (user_mode(regs))
- do_group_exit(SIGKILL);
- goto no_context;
+ if (!user_mode(regs))
+ goto no_context;
+ pagefault_out_of_memory();
+ return;
do_sigbus:
up_read(&mm->mmap_sem);
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index c505de6..46f84de 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -2,7 +2,7 @@
* linux/arch/sh/mm/init.c
*
* Copyright (C) 1999 Niibe Yutaka
- * Copyright (C) 2002 - 2007 Paul Mundt
+ * Copyright (C) 2002 - 2010 Paul Mundt
*
* Based on linux/arch/i386/mm/init.c:
* Copyright (C) 1995 Linus Torvalds
@@ -16,17 +16,31 @@
#include <linux/pagemap.h>
#include <linux/percpu.h>
#include <linux/io.h>
+#include <linux/lmb.h>
#include <linux/dma-mapping.h>
#include <asm/mmu_context.h>
+#include <asm/mmzone.h>
+#include <asm/kexec.h>
#include <asm/tlb.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
+#include <asm/setup.h>
#include <asm/cache.h>
#include <asm/sizes.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
pgd_t swapper_pg_dir[PTRS_PER_PGD];
+void __init generic_mem_init(void)
+{
+ lmb_add(__MEMORY_START, __MEMORY_SIZE);
+}
+
+void __init __weak plat_mem_setup(void)
+{
+ /* Nothing to see here, move along. */
+}
+
#ifdef CONFIG_MMU
static pte_t *__get_pte_phys(unsigned long addr)
{
@@ -152,15 +166,166 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
}
#endif /* CONFIG_MMU */
-/*
- * paging_init() sets up the page tables
- */
+void __init allocate_pgdat(unsigned int nid)
+{
+ unsigned long start_pfn, end_pfn;
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+ unsigned long phys;
+#endif
+
+ get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
+
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+ phys = __lmb_alloc_base(sizeof(struct pglist_data),
+ SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
+ /* Retry with all of system memory */
+ if (!phys)
+ phys = __lmb_alloc_base(sizeof(struct pglist_data),
+ SMP_CACHE_BYTES, lmb_end_of_DRAM());
+ if (!phys)
+ panic("Can't allocate pgdat for node %d\n", nid);
+
+ NODE_DATA(nid) = __va(phys);
+ memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
+
+ NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
+#endif
+
+ NODE_DATA(nid)->node_start_pfn = start_pfn;
+ NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
+}
+
+static void __init bootmem_init_one_node(unsigned int nid)
+{
+ unsigned long total_pages, paddr;
+ unsigned long end_pfn;
+ struct pglist_data *p;
+ int i;
+
+ p = NODE_DATA(nid);
+
+ /* Nothing to do.. */
+ if (!p->node_spanned_pages)
+ return;
+
+ end_pfn = p->node_start_pfn + p->node_spanned_pages;
+
+ total_pages = bootmem_bootmap_pages(p->node_spanned_pages);
+
+ paddr = lmb_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
+ if (!paddr)
+ panic("Can't allocate bootmap for nid[%d]\n", nid);
+
+ init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
+
+ free_bootmem_with_active_regions(nid, end_pfn);
+
+ /*
+ * XXX Handle initial reservations for the system memory node
+ * only for the moment, we'll refactor this later for handling
+ * reservations in other nodes.
+ */
+ if (nid == 0) {
+ /* Reserve the sections we're already using. */
+ for (i = 0; i < lmb.reserved.cnt; i++)
+ reserve_bootmem(lmb.reserved.region[i].base,
+ lmb_size_bytes(&lmb.reserved, i),
+ BOOTMEM_DEFAULT);
+ }
+
+ sparse_memory_present_with_active_regions(nid);
+}
+
+static void __init do_init_bootmem(void)
+{
+ int i;
+
+ /* Add active regions with valid PFNs. */
+ for (i = 0; i < lmb.memory.cnt; i++) {
+ unsigned long start_pfn, end_pfn;
+ start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
+ end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
+ __add_active_range(0, start_pfn, end_pfn);
+ }
+
+ /* All of system RAM sits in node 0 for the non-NUMA case */
+ allocate_pgdat(0);
+ node_set_online(0);
+
+ plat_mem_setup();
+
+ for_each_online_node(i)
+ bootmem_init_one_node(i);
+
+ sparse_init();
+}
+
+static void __init early_reserve_mem(void)
+{
+ unsigned long start_pfn;
+
+ /*
+ * Partially used pages are not usable - thus
+ * we are rounding upwards:
+ */
+ start_pfn = PFN_UP(__pa(_end));
+
+ /*
+ * Reserve the kernel text and Reserve the bootmem bitmap. We do
+ * this in two steps (first step was init_bootmem()), because
+ * this catches the (definitely buggy) case of us accidentally
+ * initializing the bootmem allocator with an invalid RAM area.
+ */
+ lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
+ (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
+ (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
+
+ /*
+ * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
+ */
+ if (CONFIG_ZERO_PAGE_OFFSET != 0)
+ lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
+
+ /*
+ * Handle additional early reservations
+ */
+ check_for_initrd();
+ reserve_crashkernel();
+}
+
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
unsigned long vaddr, end;
int nid;
+ lmb_init();
+
+ sh_mv.mv_mem_init();
+
+ early_reserve_mem();
+
+ lmb_enforce_memory_limit(memory_limit);
+ lmb_analyze();
+
+ lmb_dump_all();
+
+ /*
+ * Determine low and high memory ranges:
+ */
+ max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
+ min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
+
+ nodes_clear(node_online_map);
+
+ memory_start = (unsigned long)__va(__MEMORY_START);
+ memory_end = memory_start + (memory_limit ?: lmb_phys_mem_size());
+
+ uncached_init();
+ pmb_init();
+ do_init_bootmem();
+ ioremap_fixed_init();
+
/* We don't need to map the kernel through the TLB, as
* it is permanatly mapped using P1. So clear the
* entire pgd. */
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
index 961b340..a2e645f 100644
--- a/arch/sh/mm/numa.c
+++ b/arch/sh/mm/numa.c
@@ -24,44 +24,6 @@ EXPORT_SYMBOL_GPL(node_data);
* latency. Each node's pgdat is node-local at the beginning of the node,
* immediately followed by the node mem map.
*/
-void __init setup_memory(void)
-{
- unsigned long free_pfn = PFN_UP(__pa(_end));
- u64 base = min_low_pfn << PAGE_SHIFT;
- u64 size = (max_low_pfn << PAGE_SHIFT) - base;
-
- lmb_add(base, size);
-
- /* Reserve the LMB regions used by the kernel, initrd, etc.. */
- lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
- (PFN_PHYS(free_pfn) + PAGE_SIZE - 1) -
- (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
-
- /*
- * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
- */
- if (CONFIG_ZERO_PAGE_OFFSET != 0)
- lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
-
- lmb_analyze();
- lmb_dump_all();
-
- /*
- * Node 0 sets up its pgdat at the first available pfn,
- * and bumps it up before setting up the bootmem allocator.
- */
- NODE_DATA(0) = pfn_to_kaddr(free_pfn);
- memset(NODE_DATA(0), 0, sizeof(struct pglist_data));
- free_pfn += PFN_UP(sizeof(struct pglist_data));
- NODE_DATA(0)->bdata = &bootmem_node_data[0];
-
- /* Set up node 0 */
- setup_bootmem_allocator(free_pfn);
-
- /* Give the platforms a chance to hook up their nodes */
- plat_mem_setup();
-}
-
void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
{
unsigned long bootmap_pages;
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index e43ec60..18623ba 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -341,6 +341,8 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
unsigned long flags, pmb_flags;
int i, mapped;
+ if (size < SZ_16M)
+ return -EINVAL;
if (!pmb_addr_valid(vaddr, size))
return -EFAULT;
if (pmb_mapping_exists(vaddr, phys, size))
@@ -680,7 +682,7 @@ static void __init pmb_merge(struct pmb_entry *head)
/*
* The merged page size must be valid.
*/
- if (!pmb_size_valid(newsize))
+ if (!depth || !pmb_size_valid(newsize))
return;
head->flags &= ~PMB_SZ_MASK;
diff --git a/arch/sh/mm/tlb-debugfs.c b/arch/sh/mm/tlb-debugfs.c
new file mode 100644
index 0000000..229bf75
--- /dev/null
+++ b/arch/sh/mm/tlb-debugfs.c
@@ -0,0 +1,179 @@
+/*
+ * arch/sh/mm/tlb-debugfs.c
+ *
+ * debugfs ops for SH-4 ITLB/UTLBs.
+ *
+ * Copyright (C) 2010 Matt Fleming
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/processor.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+
+enum tlb_type {
+ TLB_TYPE_ITLB,
+ TLB_TYPE_UTLB,
+};
+
+static struct {
+ int bits;
+ const char *size;
+} tlb_sizes[] = {
+ { 0x0, " 1KB" },
+ { 0x1, " 4KB" },
+ { 0x2, " 8KB" },
+ { 0x4, " 64KB" },
+ { 0x5, "256KB" },
+ { 0x7, " 1MB" },
+ { 0x8, " 4MB" },
+ { 0xc, " 64MB" },
+};
+
+static int tlb_seq_show(struct seq_file *file, void *iter)
+{
+ unsigned int tlb_type = (unsigned int)file->private;
+ unsigned long addr1, addr2, data1, data2;
+ unsigned long flags;
+ unsigned long mmucr;
+ unsigned int nentries, entry;
+ unsigned int urb;
+
+ mmucr = __raw_readl(MMUCR);
+ if ((mmucr & 0x1) == 0) {
+ seq_printf(file, "address translation disabled\n");
+ return 0;
+ }
+
+ if (tlb_type == TLB_TYPE_ITLB) {
+ addr1 = MMU_ITLB_ADDRESS_ARRAY;
+ addr2 = MMU_ITLB_ADDRESS_ARRAY2;
+ data1 = MMU_ITLB_DATA_ARRAY;
+ data2 = MMU_ITLB_DATA_ARRAY2;
+ nentries = 4;
+ } else {
+ addr1 = MMU_UTLB_ADDRESS_ARRAY;
+ addr2 = MMU_UTLB_ADDRESS_ARRAY2;
+ data1 = MMU_UTLB_DATA_ARRAY;
+ data2 = MMU_UTLB_DATA_ARRAY2;
+ nentries = 64;
+ }
+
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ urb = (mmucr & MMUCR_URB) >> MMUCR_URB_SHIFT;
+
+ /* Make the "entry >= urb" test fail. */
+ if (urb == 0)
+ urb = MMUCR_URB_NENTRIES + 1;
+
+ if (tlb_type == TLB_TYPE_ITLB) {
+ addr1 = MMU_ITLB_ADDRESS_ARRAY;
+ addr2 = MMU_ITLB_ADDRESS_ARRAY2;
+ data1 = MMU_ITLB_DATA_ARRAY;
+ data2 = MMU_ITLB_DATA_ARRAY2;
+ nentries = 4;
+ } else {
+ addr1 = MMU_UTLB_ADDRESS_ARRAY;
+ addr2 = MMU_UTLB_ADDRESS_ARRAY2;
+ data1 = MMU_UTLB_DATA_ARRAY;
+ data2 = MMU_UTLB_DATA_ARRAY2;
+ nentries = 64;
+ }
+
+ seq_printf(file, "entry: vpn ppn asid size valid wired\n");
+
+ for (entry = 0; entry < nentries; entry++) {
+ unsigned long vpn, ppn, asid, size;
+ unsigned long valid;
+ unsigned long val;
+ const char *sz = " ?";
+ int i;
+
+ val = __raw_readl(addr1 | (entry << MMU_TLB_ENTRY_SHIFT));
+ ctrl_barrier();
+ vpn = val & 0xfffffc00;
+ valid = val & 0x100;
+
+ val = __raw_readl(addr2 | (entry << MMU_TLB_ENTRY_SHIFT));
+ ctrl_barrier();
+ asid = val & MMU_CONTEXT_ASID_MASK;
+
+ val = __raw_readl(data1 | (entry << MMU_TLB_ENTRY_SHIFT));
+ ctrl_barrier();
+ ppn = (val & 0x0ffffc00) << 4;
+
+ val = __raw_readl(data2 | (entry << MMU_TLB_ENTRY_SHIFT));
+ ctrl_barrier();
+ size = (val & 0xf0) >> 4;
+
+ for (i = 0; i < ARRAY_SIZE(tlb_sizes); i++) {
+ if (tlb_sizes[i].bits == size)
+ break;
+ }
+
+ if (i != ARRAY_SIZE(tlb_sizes))
+ sz = tlb_sizes[i].size;
+
+ seq_printf(file, "%2d: 0x%08lx 0x%08lx %5lu %s %s %s\n",
+ entry, vpn, ppn, asid,
+ sz, valid ? "V" : "-",
+ (urb <= entry) ? "W" : "-");
+ }
+
+ back_to_cached();
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int tlb_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tlb_seq_show, inode->i_private);
+}
+
+static const struct file_operations tlb_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = tlb_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init tlb_debugfs_init(void)
+{
+ struct dentry *itlb, *utlb;
+
+ itlb = debugfs_create_file("itlb", S_IRUSR, sh_debugfs_root,
+ (unsigned int *)TLB_TYPE_ITLB,
+ &tlb_debugfs_fops);
+ if (unlikely(!itlb))
+ return -ENOMEM;
+ if (IS_ERR(itlb))
+ return PTR_ERR(itlb);
+
+ utlb = debugfs_create_file("utlb", S_IRUSR, sh_debugfs_root,
+ (unsigned int *)TLB_TYPE_UTLB,
+ &tlb_debugfs_fops);
+ if (unlikely(!utlb)) {
+ debugfs_remove(itlb);
+ return -ENOMEM;
+ }
+
+ if (IS_ERR(utlb)) {
+ debugfs_remove(itlb);
+ return PTR_ERR(utlb);
+ }
+
+ return 0;
+}
+module_init(tlb_debugfs_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
index 706da1d..03db41c 100644
--- a/arch/sh/mm/tlbflush_64.c
+++ b/arch/sh/mm/tlbflush_64.c
@@ -189,7 +189,6 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
-survive:
fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
@@ -294,22 +293,11 @@ no_context:
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- if (is_global_init(current)) {
- panic("INIT out of memory\n");
- yield();
- goto survive;
- }
- printk("fault:Out of memory\n");
up_read(&mm->mmap_sem);
- if (is_global_init(current)) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
- printk("VM: killing process %s\n", tsk->comm);
- if (user_mode(regs))
- do_group_exit(SIGKILL);
- goto no_context;
+ if (!user_mode(regs))
+ goto no_context;
+ pagefault_out_of_memory();
+ return;
do_sigbus:
printk("fault:Do sigbus\n");