summaryrefslogtreecommitdiff
path: root/arch/avr32/mm/tlb.c
diff options
context:
space:
mode:
authorHaavard Skinnemoen <hskinnemoen@atmel.com>2006-09-26 06:32:13 (GMT)
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 15:48:54 (GMT)
commit5f97f7f9400de47ae837170bb274e90ad3934386 (patch)
tree514451e6dc6b46253293a00035d375e77b1c65ed /arch/avr32/mm/tlb.c
parent53e62d3aaa60590d4a69b4e07c29f448b5151047 (diff)
downloadlinux-fsl-qoriq-5f97f7f9400de47ae837170bb274e90ad3934386.tar.xz
[PATCH] avr32 architecture
This adds support for the Atmel AVR32 architecture as well as the AT32AP7000 CPU and the AT32STK1000 development board. AVR32 is a new high-performance 32-bit RISC microprocessor core, designed for cost-sensitive embedded applications, with particular emphasis on low power consumption and high code density. The AVR32 architecture is not binary compatible with earlier 8-bit AVR architectures. The AVR32 architecture, including the instruction set, is described by the AVR32 Architecture Manual, available from http://www.atmel.com/dyn/resources/prod_documents/doc32000.pdf The Atmel AT32AP7000 is the first CPU implementing the AVR32 architecture. It features a 7-stage pipeline, 16KB instruction and data caches and a full Memory Management Unit. It also comes with a large set of integrated peripherals, many of which are shared with the AT91 ARM-based controllers from Atmel. Full data sheet is available from http://www.atmel.com/dyn/resources/prod_documents/doc32003.pdf while the CPU core implementation including caches and MMU is documented by the AVR32 AP Technical Reference, available from http://www.atmel.com/dyn/resources/prod_documents/doc32001.pdf Information about the AT32STK1000 development board can be found at http://www.atmel.com/dyn/products/tools_card.asp?tool_id=3918 including a BSP CD image with an earlier version of this patch, development tools (binaries and source/patches) and a root filesystem image suitable for booting from SD card. Alternatively, there's a preliminary "getting started" guide available at http://avr32linux.org/twiki/bin/view/Main/GettingStarted which provides links to the sources and patches you will need in order to set up a cross-compiling environment for avr32-linux. This patch, as well as the other patches included with the BSP and the toolchain patches, is actively supported by Atmel Corporation. [dmccr@us.ibm.com: Fix more pxx_page macro locations] [bunk@stusta.de: fix `make defconfig'] Signed-off-by: Haavard Skinnemoen <hskinnemoen@atmel.com> Signed-off-by: Adrian Bunk <bunk@stusta.de> Signed-off-by: Dave McCracken <dmccr@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/avr32/mm/tlb.c')
-rw-r--r--arch/avr32/mm/tlb.c378
1 files changed, 378 insertions, 0 deletions
diff --git a/arch/avr32/mm/tlb.c b/arch/avr32/mm/tlb.c
new file mode 100644
index 0000000..5d0523b
--- /dev/null
+++ b/arch/avr32/mm/tlb.c
@@ -0,0 +1,378 @@
+/*
+ * AVR32 TLB operations
+ *
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/mm.h>
+
+#include <asm/mmu_context.h>
+
+#define _TLBEHI_I 0x100
+
+void show_dtlb_entry(unsigned int index)
+{
+ unsigned int tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save, flags;
+
+ local_irq_save(flags);
+ mmucr_save = sysreg_read(MMUCR);
+ tlbehi_save = sysreg_read(TLBEHI);
+ mmucr = mmucr_save & 0x13;
+ mmucr |= index << 14;
+ sysreg_write(MMUCR, mmucr);
+
+ asm volatile("tlbr" : : : "memory");
+ cpu_sync_pipeline();
+
+ tlbehi = sysreg_read(TLBEHI);
+ tlbelo = sysreg_read(TLBELO);
+
+ printk("%2u: %c %c %02x %05x %05x %o %o %c %c %c %c\n",
+ index,
+ (tlbehi & 0x200)?'1':'0',
+ (tlbelo & 0x100)?'1':'0',
+ (tlbehi & 0xff),
+ (tlbehi >> 12), (tlbelo >> 12),
+ (tlbelo >> 4) & 7, (tlbelo >> 2) & 3,
+ (tlbelo & 0x200)?'1':'0',
+ (tlbelo & 0x080)?'1':'0',
+ (tlbelo & 0x001)?'1':'0',
+ (tlbelo & 0x002)?'1':'0');
+
+ sysreg_write(MMUCR, mmucr_save);
+ sysreg_write(TLBEHI, tlbehi_save);
+ cpu_sync_pipeline();
+ local_irq_restore(flags);
+}
+
+void dump_dtlb(void)
+{
+ unsigned int i;
+
+ printk("ID V G ASID VPN PFN AP SZ C B W D\n");
+ for (i = 0; i < 32; i++)
+ show_dtlb_entry(i);
+}
+
+static unsigned long last_mmucr;
+
+static inline void set_replacement_pointer(unsigned shift)
+{
+ unsigned long mmucr, mmucr_save;
+
+ mmucr = mmucr_save = sysreg_read(MMUCR);
+
+ /* Does this mapping already exist? */
+ __asm__ __volatile__(
+ " tlbs\n"
+ " mfsr %0, %1"
+ : "=r"(mmucr)
+ : "i"(SYSREG_MMUCR));
+
+ if (mmucr & SYSREG_BIT(MMUCR_N)) {
+ /* Not found -- pick a not-recently-accessed entry */
+ unsigned long rp;
+ unsigned long tlbar = sysreg_read(TLBARLO);
+
+ rp = 32 - fls(tlbar);
+ if (rp == 32) {
+ rp = 0;
+ sysreg_write(TLBARLO, -1L);
+ }
+
+ mmucr &= 0x13;
+ mmucr |= (rp << shift);
+
+ sysreg_write(MMUCR, mmucr);
+ }
+
+ last_mmucr = mmucr;
+}
+
+static void update_dtlb(unsigned long address, pte_t pte, unsigned long asid)
+{
+ unsigned long vpn;
+
+ vpn = (address & MMU_VPN_MASK) | _TLBEHI_VALID | asid;
+ sysreg_write(TLBEHI, vpn);
+ cpu_sync_pipeline();
+
+ set_replacement_pointer(14);
+
+ sysreg_write(TLBELO, pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK);
+
+ /* Let's go */
+ asm volatile("nop\n\ttlbw" : : : "memory");
+ cpu_sync_pipeline();
+}
+
+void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t pte)
+{
+ unsigned long flags;
+
+ /* ptrace may call this routine */
+ if (vma && current->active_mm != vma->vm_mm)
+ return;
+
+ local_irq_save(flags);
+ update_dtlb(address, pte, get_asid());
+ local_irq_restore(flags);
+}
+
+void __flush_tlb_page(unsigned long asid, unsigned long page)
+{
+ unsigned long mmucr, tlbehi;
+
+ page |= asid;
+ sysreg_write(TLBEHI, page);
+ cpu_sync_pipeline();
+ asm volatile("tlbs");
+ mmucr = sysreg_read(MMUCR);
+
+ if (!(mmucr & SYSREG_BIT(MMUCR_N))) {
+ unsigned long tlbarlo;
+ unsigned long entry;
+
+ /* Clear the "valid" bit */
+ tlbehi = sysreg_read(TLBEHI);
+ tlbehi &= ~_TLBEHI_VALID;
+ sysreg_write(TLBEHI, tlbehi);
+ cpu_sync_pipeline();
+
+ /* mark the entry as "not accessed" */
+ entry = (mmucr >> 14) & 0x3f;
+ tlbarlo = sysreg_read(TLBARLO);
+ tlbarlo |= (0x80000000 >> entry);
+ sysreg_write(TLBARLO, tlbarlo);
+
+ /* update the entry with valid bit clear */
+ asm volatile("tlbw");
+ cpu_sync_pipeline();
+ }
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
+ unsigned long flags, asid;
+ unsigned long saved_asid = MMU_NO_ASID;
+
+ asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
+ page &= PAGE_MASK;
+
+ local_irq_save(flags);
+ if (vma->vm_mm != current->mm) {
+ saved_asid = get_asid();
+ set_asid(asid);
+ }
+
+ __flush_tlb_page(asid, page);
+
+ if (saved_asid != MMU_NO_ASID)
+ set_asid(saved_asid);
+ local_irq_restore(flags);
+ }
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ if (mm->context != NO_CONTEXT) {
+ unsigned long flags;
+ int size;
+
+ local_irq_save(flags);
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */
+ mm->context = NO_CONTEXT;
+ if (mm == current->mm)
+ activate_context(mm);
+ } else {
+ unsigned long asid = mm->context & MMU_CONTEXT_ASID_MASK;
+ unsigned long saved_asid = MMU_NO_ASID;
+
+ start &= PAGE_MASK;
+ end += (PAGE_SIZE - 1);
+ end &= PAGE_MASK;
+ if (mm != current->mm) {
+ saved_asid = get_asid();
+ set_asid(asid);
+ }
+
+ while (start < end) {
+ __flush_tlb_page(asid, start);
+ start += PAGE_SIZE;
+ }
+ if (saved_asid != MMU_NO_ASID)
+ set_asid(saved_asid);
+ }
+ local_irq_restore(flags);
+ }
+}
+
+/*
+ * TODO: If this is only called for addresses > TASK_SIZE, we can probably
+ * skip the ASID stuff and just use the Global bit...
+ */
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+ int size;
+
+ local_irq_save(flags);
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */
+ flush_tlb_all();
+ } else {
+ unsigned long asid = init_mm.context & MMU_CONTEXT_ASID_MASK;
+ unsigned long saved_asid = get_asid();
+
+ start &= PAGE_MASK;
+ end += (PAGE_SIZE - 1);
+ end &= PAGE_MASK;
+ set_asid(asid);
+ while (start < end) {
+ __flush_tlb_page(asid, start);
+ start += PAGE_SIZE;
+ }
+ set_asid(saved_asid);
+ }
+ local_irq_restore(flags);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ /* Invalidate all TLB entries of this process by getting a new ASID */
+ if (mm->context != NO_CONTEXT) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ mm->context = NO_CONTEXT;
+ if (mm == current->mm)
+ activate_context(mm);
+ local_irq_restore(flags);
+ }
+}
+
+void flush_tlb_all(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ sysreg_write(MMUCR, sysreg_read(MMUCR) | SYSREG_BIT(MMUCR_I));
+ local_irq_restore(flags);
+}
+
+#ifdef CONFIG_PROC_FS
+
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+
+static void *tlb_start(struct seq_file *tlb, loff_t *pos)
+{
+ static unsigned long tlb_index;
+
+ if (*pos >= 32)
+ return NULL;
+
+ tlb_index = 0;
+ return &tlb_index;
+}
+
+static void *tlb_next(struct seq_file *tlb, void *v, loff_t *pos)
+{
+ unsigned long *index = v;
+
+ if (*index >= 31)
+ return NULL;
+
+ ++*pos;
+ ++*index;
+ return index;
+}
+
+static void tlb_stop(struct seq_file *tlb, void *v)
+{
+
+}
+
+static int tlb_show(struct seq_file *tlb, void *v)
+{
+ unsigned int tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save, flags;
+ unsigned long *index = v;
+
+ if (*index == 0)
+ seq_puts(tlb, "ID V G ASID VPN PFN AP SZ C B W D\n");
+
+ BUG_ON(*index >= 32);
+
+ local_irq_save(flags);
+ mmucr_save = sysreg_read(MMUCR);
+ tlbehi_save = sysreg_read(TLBEHI);
+ mmucr = mmucr_save & 0x13;
+ mmucr |= *index << 14;
+ sysreg_write(MMUCR, mmucr);
+
+ asm volatile("tlbr" : : : "memory");
+ cpu_sync_pipeline();
+
+ tlbehi = sysreg_read(TLBEHI);
+ tlbelo = sysreg_read(TLBELO);
+
+ sysreg_write(MMUCR, mmucr_save);
+ sysreg_write(TLBEHI, tlbehi_save);
+ cpu_sync_pipeline();
+ local_irq_restore(flags);
+
+ seq_printf(tlb, "%2lu: %c %c %02x %05x %05x %o %o %c %c %c %c\n",
+ *index,
+ (tlbehi & 0x200)?'1':'0',
+ (tlbelo & 0x100)?'1':'0',
+ (tlbehi & 0xff),
+ (tlbehi >> 12), (tlbelo >> 12),
+ (tlbelo >> 4) & 7, (tlbelo >> 2) & 3,
+ (tlbelo & 0x200)?'1':'0',
+ (tlbelo & 0x080)?'1':'0',
+ (tlbelo & 0x001)?'1':'0',
+ (tlbelo & 0x002)?'1':'0');
+
+ return 0;
+}
+
+static struct seq_operations tlb_ops = {
+ .start = tlb_start,
+ .next = tlb_next,
+ .stop = tlb_stop,
+ .show = tlb_show,
+};
+
+static int tlb_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &tlb_ops);
+}
+
+static struct file_operations proc_tlb_operations = {
+ .open = tlb_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int __init proctlb_init(void)
+{
+ struct proc_dir_entry *entry;
+
+ entry = create_proc_entry("tlb", 0, NULL);
+ if (entry)
+ entry->proc_fops = &proc_tlb_operations;
+ return 0;
+}
+late_initcall(proctlb_init);
+#endif /* CONFIG_PROC_FS */