diff options
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/mips/mm/c-r3k.c | 12 | ||||
-rw-r--r-- | arch/mips/mm/c-r4k.c | 116 | ||||
-rw-r--r-- | arch/mips/mm/c-sb1.c | 535 | ||||
-rw-r--r-- | arch/mips/mm/c-tx39.c | 6 | ||||
-rw-r--r-- | arch/mips/mm/cache.c | 9 | ||||
-rw-r--r-- | arch/mips/mm/cerr-sb1.c | 24 | ||||
-rw-r--r-- | arch/mips/mm/dma-default.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/pg-r4k.c | 22 | ||||
-rw-r--r-- | arch/mips/mm/pg-sb1.c | 12 | ||||
-rw-r--r-- | arch/mips/mm/pgtable.c | 8 | ||||
-rw-r--r-- | arch/mips/mm/sc-mips.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r4k.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r8k.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 210 |
15 files changed, 230 insertions, 736 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 43e4810..32fd5db 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -22,7 +22,7 @@ obj-$(CONFIG_CPU_R5432) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o obj-$(CONFIG_CPU_R8000) += c-r4k.o cex-gen.o pg-r4k.o tlb-r8k.o obj-$(CONFIG_CPU_RM7000) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o obj-$(CONFIG_CPU_RM9000) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o -obj-$(CONFIG_CPU_SB1) += c-sb1.o cerr-sb1.o cex-sb1.o pg-sb1.o \ +obj-$(CONFIG_CPU_SB1) += c-r4k.o cerr-sb1.o cex-sb1.o pg-sb1.o \ tlb-r4k.o obj-$(CONFIG_CPU_TX39XX) += c-tx39.o pg-r4k.o tlb-r3k.o obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c index 59868a1..c55312f 100644 --- a/arch/mips/mm/c-r3k.c +++ b/arch/mips/mm/c-r3k.c @@ -121,7 +121,7 @@ static void r3k_flush_icache_range(unsigned long start, unsigned long end) write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC); for (i = 0; i < size; i += 0x080) { - asm ( "sb\t$0, 0x000(%0)\n\t" + asm( "sb\t$0, 0x000(%0)\n\t" "sb\t$0, 0x004(%0)\n\t" "sb\t$0, 0x008(%0)\n\t" "sb\t$0, 0x00c(%0)\n\t" @@ -178,7 +178,7 @@ static void r3k_flush_dcache_range(unsigned long start, unsigned long end) write_c0_status((ST0_ISC|flags)&~ST0_IEC); for (i = 0; i < size; i += 0x080) { - asm ( "sb\t$0, 0x000(%0)\n\t" + asm( "sb\t$0, 0x000(%0)\n\t" "sb\t$0, 0x004(%0)\n\t" "sb\t$0, 0x008(%0)\n\t" "sb\t$0, 0x00c(%0)\n\t" @@ -217,8 +217,8 @@ static void r3k_flush_dcache_range(unsigned long start, unsigned long end) write_c0_status(flags); } -static inline unsigned long get_phys_page (unsigned long addr, - struct mm_struct *mm) +static inline unsigned long get_phys_page(unsigned long addr, + struct mm_struct *mm) { pgd_t *pgd; pud_t *pud; @@ -281,13 +281,13 @@ static void r3k_flush_cache_sigtramp(unsigned long addr) write_c0_status(flags&~ST0_IEC); /* Fill the TLB to avoid an exception with caches isolated. */ - asm ( "lw\t$0, 0x000(%0)\n\t" + asm( "lw\t$0, 0x000(%0)\n\t" "lw\t$0, 0x004(%0)\n\t" : : "r" (addr) ); write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC); - asm ( "sb\t$0, 0x000(%0)\n\t" + asm( "sb\t$0, 0x000(%0)\n\t" "sb\t$0, 0x004(%0)\n\t" : : "r" (addr) ); diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index bad5719..971f6c0 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -8,7 +8,9 @@ * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */ #include <linux/init.h> +#include <linux/highmem.h> #include <linux/kernel.h> +#include <linux/linkage.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/bitops.h> @@ -162,12 +164,12 @@ static inline void tx49_blast_icache32(void) /* I'm in even chunk. blast odd chunks */ for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start + 0x400; addr < end; addr += 0x400 * 2) - cache32_unroll32(addr|ws,Index_Invalidate_I); + cache32_unroll32(addr|ws, Index_Invalidate_I); CACHE32_UNROLL32_ALIGN; /* I'm in odd chunk. blast even chunks */ for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 0x400 * 2) - cache32_unroll32(addr|ws,Index_Invalidate_I); + cache32_unroll32(addr|ws, Index_Invalidate_I); } static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page) @@ -193,12 +195,12 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page) /* I'm in even chunk. blast odd chunks */ for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start + 0x400; addr < end; addr += 0x400 * 2) - cache32_unroll32(addr|ws,Index_Invalidate_I); + cache32_unroll32(addr|ws, Index_Invalidate_I); CACHE32_UNROLL32_ALIGN; /* I'm in odd chunk. blast even chunks */ for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 0x400 * 2) - cache32_unroll32(addr|ws,Index_Invalidate_I); + cache32_unroll32(addr|ws, Index_Invalidate_I); } static void (* r4k_blast_icache_page)(unsigned long addr); @@ -317,23 +319,6 @@ static void __init r4k_blast_scache_setup(void) r4k_blast_scache = blast_scache128; } -/* - * This is former mm's flush_cache_all() which really should be - * flush_cache_vunmap these days ... - */ -static inline void local_r4k_flush_cache_all(void * args) -{ - r4k_blast_dcache(); -} - -static void r4k_flush_cache_all(void) -{ - if (!cpu_has_dc_aliases) - return; - - r4k_on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1); -} - static inline void local_r4k___flush_cache_all(void * args) { #if defined(CONFIG_CPU_LOONGSON2) @@ -343,7 +328,7 @@ static inline void local_r4k___flush_cache_all(void * args) r4k_blast_dcache(); r4k_blast_icache(); - switch (current_cpu_data.cputype) { + switch (current_cpu_type()) { case CPU_R4000SC: case CPU_R4000MC: case CPU_R4400SC: @@ -392,10 +377,10 @@ static inline void local_r4k_flush_cache_mm(void * args) * R4000SC and R4400SC indexed S-cache ops also invalidate primary * caches, so we can bail out early. */ - if (current_cpu_data.cputype == CPU_R4000SC || - current_cpu_data.cputype == CPU_R4000MC || - current_cpu_data.cputype == CPU_R4400SC || - current_cpu_data.cputype == CPU_R4400MC) { + if (current_cpu_type() == CPU_R4000SC || + current_cpu_type() == CPU_R4000MC || + current_cpu_type() == CPU_R4400SC || + current_cpu_type() == CPU_R4400MC) { r4k_blast_scache(); return; } @@ -422,13 +407,14 @@ static inline void local_r4k_flush_cache_page(void *args) struct flush_cache_page_args *fcp_args = args; struct vm_area_struct *vma = fcp_args->vma; unsigned long addr = fcp_args->addr; - unsigned long paddr = fcp_args->pfn << PAGE_SHIFT; + struct page *page = pfn_to_page(fcp_args->pfn); int exec = vma->vm_flags & VM_EXEC; struct mm_struct *mm = vma->vm_mm; pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; + void *vaddr; /* * If ownes no valid ASID yet, cannot possibly have gotten @@ -450,43 +436,40 @@ static inline void local_r4k_flush_cache_page(void *args) if (!(pte_val(*ptep) & _PAGE_PRESENT)) return; - /* - * Doing flushes for another ASID than the current one is - * too difficult since stupid R4k caches do a TLB translation - * for every cache flush operation. So we do indexed flushes - * in that case, which doesn't overly flush the cache too much. - */ - if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) { - if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { - r4k_blast_dcache_page(addr); - if (exec && !cpu_icache_snoops_remote_store) - r4k_blast_scache_page(addr); - } - if (exec) - r4k_blast_icache_page(addr); - - return; + if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) + vaddr = NULL; + else { + /* + * Use kmap_coherent or kmap_atomic to do flushes for + * another ASID than the current one. + */ + if (cpu_has_dc_aliases) + vaddr = kmap_coherent(page, addr); + else + vaddr = kmap_atomic(page, KM_USER0); + addr = (unsigned long)vaddr; } - /* - * Do indexed flush, too much work to get the (possible) TLB refills - * to work correctly. - */ if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { - r4k_blast_dcache_page_indexed(cpu_has_pindexed_dcache ? - paddr : addr); - if (exec && !cpu_icache_snoops_remote_store) { - r4k_blast_scache_page_indexed(paddr); - } + r4k_blast_dcache_page(addr); + if (exec && !cpu_icache_snoops_remote_store) + r4k_blast_scache_page(addr); } if (exec) { - if (cpu_has_vtag_icache && mm == current->active_mm) { + if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) { int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) drop_mmu_context(mm, cpu); } else - r4k_blast_icache_page_indexed(addr); + r4k_blast_icache_page(addr); + } + + if (vaddr) { + if (cpu_has_dc_aliases) + kunmap_coherent(); + else + kunmap_atomic(vaddr, KM_USER0); } } @@ -948,12 +931,16 @@ static void __init probe_pcache(void) switch (c->cputype) { case CPU_20KC: case CPU_25KF: + case CPU_SB1: + case CPU_SB1A: c->dcache.flags |= MIPS_CACHE_PINDEX; + break; + case CPU_R10000: case CPU_R12000: case CPU_R14000: - case CPU_SB1: break; + case CPU_24K: case CPU_34K: case CPU_74K: @@ -1210,7 +1197,7 @@ static void __init coherency_setup(void) * this bit and; some wire it to zero, others like Toshiba had the * silly idea of putting something else there ... */ - switch (current_cpu_data.cputype) { + switch (current_cpu_type()) { case CPU_R4000PC: case CPU_R4000SC: case CPU_R4000MC: @@ -1235,11 +1222,20 @@ void __init r4k_cache_init(void) { extern void build_clear_page(void); extern void build_copy_page(void); - extern char except_vec2_generic; + extern char __weak except_vec2_generic; + extern char __weak except_vec2_sb1; struct cpuinfo_mips *c = ¤t_cpu_data; - /* Default cache error handler for R4000 and R5000 family */ - set_uncached_handler (0x100, &except_vec2_generic, 0x80); + switch (c->cputype) { + case CPU_SB1: + case CPU_SB1A: + set_uncached_handler(0x100, &except_vec2_sb1, 0x80); + break; + + default: + set_uncached_handler(0x100, &except_vec2_generic, 0x80); + break; + } probe_pcache(); setup_scache(); @@ -1265,7 +1261,7 @@ void __init r4k_cache_init(void) PAGE_SIZE - 1); else shm_align_mask = PAGE_SIZE-1; - flush_cache_all = r4k_flush_cache_all; + flush_cache_all = cache_noop; __flush_cache_all = r4k___flush_cache_all; flush_cache_mm = r4k_flush_cache_mm; flush_cache_page = r4k_flush_cache_page; diff --git a/arch/mips/mm/c-sb1.c b/arch/mips/mm/c-sb1.c deleted file mode 100644 index 85ce284..0000000 --- a/arch/mips/mm/c-sb1.c +++ /dev/null @@ -1,535 +0,0 @@ -/* - * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) - * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org) - * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation - * Copyright (C) 2004 Maciej W. Rozycki - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - */ -#include <linux/init.h> -#include <linux/hardirq.h> - -#include <asm/asm.h> -#include <asm/bootinfo.h> -#include <asm/cacheops.h> -#include <asm/cpu.h> -#include <asm/mipsregs.h> -#include <asm/mmu_context.h> -#include <asm/uaccess.h> - -extern void sb1_dma_init(void); - -/* These are probed at ld_mmu time */ -static unsigned long icache_size; -static unsigned long dcache_size; - -static unsigned short icache_line_size; -static unsigned short dcache_line_size; - -static unsigned int icache_index_mask; -static unsigned int dcache_index_mask; - -static unsigned short icache_assoc; -static unsigned short dcache_assoc; - -static unsigned short icache_sets; -static unsigned short dcache_sets; - -static unsigned int icache_range_cutoff; -static unsigned int dcache_range_cutoff; - -static inline void sb1_on_each_cpu(void (*func) (void *info), void *info, - int retry, int wait) -{ - preempt_disable(); - smp_call_function(func, info, retry, wait); - func(info); - preempt_enable(); -} - -/* - * The dcache is fully coherent to the system, with one - * big caveat: the instruction stream. In other words, - * if we miss in the icache, and have dirty data in the - * L1 dcache, then we'll go out to memory (or the L2) and - * get the not-as-recent data. - * - * So the only time we have to flush the dcache is when - * we're flushing the icache. Since the L2 is fully - * coherent to everything, including I/O, we never have - * to flush it - */ - -#define cache_set_op(op, addr) \ - __asm__ __volatile__( \ - " .set noreorder \n" \ - " .set mips64\n\t \n" \ - " cache %0, (0<<13)(%1) \n" \ - " cache %0, (1<<13)(%1) \n" \ - " cache %0, (2<<13)(%1) \n" \ - " cache %0, (3<<13)(%1) \n" \ - " .set mips0 \n" \ - " .set reorder" \ - : \ - : "i" (op), "r" (addr)) - -#define sync() \ - __asm__ __volatile( \ - " .set mips64\n\t \n" \ - " sync \n" \ - " .set mips0") - -#define mispredict() \ - __asm__ __volatile__( \ - " bnezl $0, 1f \n" /* Force mispredict */ \ - "1: \n"); - -/* - * Writeback and invalidate the entire dcache - */ -static inline void __sb1_writeback_inv_dcache_all(void) -{ - unsigned long addr = 0; - - while (addr < dcache_line_size * dcache_sets) { - cache_set_op(Index_Writeback_Inv_D, addr); - addr += dcache_line_size; - } -} - -/* - * Writeback and invalidate a range of the dcache. The addresses are - * virtual, and since we're using index ops and bit 12 is part of both - * the virtual frame and physical index, we have to clear both sets - * (bit 12 set and cleared). - */ -static inline void __sb1_writeback_inv_dcache_range(unsigned long start, - unsigned long end) -{ - unsigned long index; - - start &= ~(dcache_line_size - 1); - end = (end + dcache_line_size - 1) & ~(dcache_line_size - 1); - - while (start != end) { - index = start & dcache_index_mask; - cache_set_op(Index_Writeback_Inv_D, index); - cache_set_op(Index_Writeback_Inv_D, index ^ (1<<12)); - start += dcache_line_size; - } - sync(); -} - -/* - * Writeback and invalidate a range of the dcache. With physical - * addresseses, we don't have to worry about possible bit 12 aliasing. - * XXXKW is it worth turning on KX and using hit ops with xkphys? - */ -static inline void __sb1_writeback_inv_dcache_phys_range(unsigned long start, - unsigned long end) -{ - start &= ~(dcache_line_size - 1); - end = (end + dcache_line_size - 1) & ~(dcache_line_size - 1); - - while (start != end) { - cache_set_op(Index_Writeback_Inv_D, start & dcache_index_mask); - start += dcache_line_size; - } - sync(); -} - - -/* - * Invalidate the entire icache - */ -static inline void __sb1_flush_icache_all(void) -{ - unsigned long addr = 0; - - while (addr < icache_line_size * icache_sets) { - cache_set_op(Index_Invalidate_I, addr); - addr += icache_line_size; - } -} - -/* - * Invalidate a range of the icache. The addresses are virtual, and - * the cache is virtually indexed and tagged. However, we don't - * necessarily have the right ASID context, so use index ops instead - * of hit ops. - */ -static inline void __sb1_flush_icache_range(unsigned long start, - unsigned long end) -{ - start &= ~(icache_line_size - 1); - end = (end + icache_line_size - 1) & ~(icache_line_size - 1); - - while (start != end) { - cache_set_op(Index_Invalidate_I, start & icache_index_mask); - start += icache_line_size; - } - mispredict(); - sync(); -} - -/* - * Flush the icache for a given physical page. Need to writeback the - * dcache first, then invalidate the icache. If the page isn't - * executable, nothing is required. - */ -static void local_sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) -{ - int cpu = smp_processor_id(); - -#ifndef CONFIG_SMP - if (!(vma->vm_flags & VM_EXEC)) - return; -#endif - - __sb1_writeback_inv_dcache_range(addr, addr + PAGE_SIZE); - - /* - * Bumping the ASID is probably cheaper than the flush ... - */ - if (vma->vm_mm == current->active_mm) { - if (cpu_context(cpu, vma->vm_mm) != 0) - drop_mmu_context(vma->vm_mm, cpu); - } else - __sb1_flush_icache_range(addr, addr + PAGE_SIZE); -} - -#ifdef CONFIG_SMP -struct flush_cache_page_args { - struct vm_area_struct *vma; - unsigned long addr; - unsigned long pfn; -}; - -static void sb1_flush_cache_page_ipi(void *info) -{ - struct flush_cache_page_args *args = info; - - local_sb1_flush_cache_page(args->vma, args->addr, args->pfn); -} - -/* Dirty dcache could be on another CPU, so do the IPIs */ -static void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) -{ - struct flush_cache_page_args args; - - if (!(vma->vm_flags & VM_EXEC)) - return; - - addr &= PAGE_MASK; - args.vma = vma; - args.addr = addr; - args.pfn = pfn; - sb1_on_each_cpu(sb1_flush_cache_page_ipi, (void *) &args, 1, 1); -} -#else -void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) - __attribute__((alias("local_sb1_flush_cache_page"))); -#endif - -#ifdef CONFIG_SMP -static void sb1_flush_cache_data_page_ipi(void *info) -{ - unsigned long start = (unsigned long)info; - - __sb1_writeback_inv_dcache_range(start, start + PAGE_SIZE); -} - -static void sb1_flush_cache_data_page(unsigned long addr) -{ - if (in_atomic()) - __sb1_writeback_inv_dcache_range(addr, addr + PAGE_SIZE); - else - on_each_cpu(sb1_flush_cache_data_page_ipi, (void *) addr, 1, 1); -} -#else - -static void local_sb1_flush_cache_data_page(unsigned long addr) -{ - __sb1_writeback_inv_dcache_range(addr, addr + PAGE_SIZE); -} - -void sb1_flush_cache_data_page(unsigned long) - __attribute__((alias("local_sb1_flush_cache_data_page"))); -#endif - -/* - * Invalidate all caches on this CPU - */ -static void __used local_sb1___flush_cache_all(void) -{ - __sb1_writeback_inv_dcache_all(); - __sb1_flush_icache_all(); -} - -#ifdef CONFIG_SMP -void sb1___flush_cache_all_ipi(void *ignored) - __attribute__((alias("local_sb1___flush_cache_all"))); - -static void sb1___flush_cache_all(void) -{ - sb1_on_each_cpu(sb1___flush_cache_all_ipi, 0, 1, 1); -} -#else -void sb1___flush_cache_all(void) - __attribute__((alias("local_sb1___flush_cache_all"))); -#endif - -/* - * When flushing a range in the icache, we have to first writeback - * the dcache for the same range, so new ifetches will see any - * data that was dirty in the dcache. - * - * The start/end arguments are Kseg addresses (possibly mapped Kseg). - */ - -static void local_sb1_flush_icache_range(unsigned long start, - unsigned long end) -{ - /* Just wb-inv the whole dcache if the range is big enough */ - if ((end - start) > dcache_range_cutoff) - __sb1_writeback_inv_dcache_all(); - else - __sb1_writeback_inv_dcache_range(start, end); - - /* Just flush the whole icache if the range is big enough */ - if ((end - start) > icache_range_cutoff) - __sb1_flush_icache_all(); - else - __sb1_flush_icache_range(start, end); -} - -#ifdef CONFIG_SMP -struct flush_icache_range_args { - unsigned long start; - unsigned long end; -}; - -static void sb1_flush_icache_range_ipi(void *info) -{ - struct flush_icache_range_args *args = info; - - local_sb1_flush_icache_range(args->start, args->end); -} - -void sb1_flush_icache_range(unsigned long start, unsigned long end) -{ - struct flush_icache_range_args args; - - args.start = start; - args.end = end; - sb1_on_each_cpu(sb1_flush_icache_range_ipi, &args, 1, 1); -} -#else -void sb1_flush_icache_range(unsigned long start, unsigned long end) - __attribute__((alias("local_sb1_flush_icache_range"))); -#endif - -/* - * A signal trampoline must fit into a single cacheline. - */ -static void local_sb1_flush_cache_sigtramp(unsigned long addr) -{ - cache_set_op(Index_Writeback_Inv_D, addr & dcache_index_mask); - cache_set_op(Index_Writeback_Inv_D, (addr ^ (1<<12)) & dcache_index_mask); - cache_set_op(Index_Invalidate_I, addr & icache_index_mask); - mispredict(); -} - -#ifdef CONFIG_SMP -static void sb1_flush_cache_sigtramp_ipi(void *info) -{ - unsigned long iaddr = (unsigned long) info; - local_sb1_flush_cache_sigtramp(iaddr); -} - -static void sb1_flush_cache_sigtramp(unsigned long addr) -{ - sb1_on_each_cpu(sb1_flush_cache_sigtramp_ipi, (void *) addr, 1, 1); -} -#else -void sb1_flush_cache_sigtramp(unsigned long addr) - __attribute__((alias("local_sb1_flush_cache_sigtramp"))); -#endif - - -/* - * Anything that just flushes dcache state can be ignored, as we're always - * coherent in dcache space. This is just a dummy function that all the - * nop'ed routines point to - */ -static void sb1_nop(void) -{ -} - -/* - * Cache set values (from the mips64 spec) - * 0 - 64 - * 1 - 128 - * 2 - 256 - * 3 - 512 - * 4 - 1024 - * 5 - 2048 - * 6 - 4096 - * 7 - Reserved - */ - -static unsigned int decode_cache_sets(unsigned int config_field) -{ - if (config_field == 7) { - /* JDCXXX - Find a graceful way to abort. */ - return 0; - } - return (1<<(config_field + 6)); -} - -/* - * Cache line size values (from the mips64 spec) - * 0 - No cache present. - * 1 - 4 bytes - * 2 - 8 bytes - * 3 - 16 bytes - * 4 - 32 bytes - * 5 - 64 bytes - * 6 - 128 bytes - * 7 - Reserved - */ - -static unsigned int decode_cache_line_size(unsigned int config_field) -{ - if (config_field == 0) { - return 0; - } else if (config_field == 7) { - /* JDCXXX - Find a graceful way to abort. */ - return 0; - } - return (1<<(config_field + 1)); -} - -/* - * Relevant bits of the config1 register format (from the MIPS32/MIPS64 specs) - * - * 24:22 Icache sets per way - * 21:19 Icache line size - * 18:16 Icache Associativity - * 15:13 Dcache sets per way - * 12:10 Dcache line size - * 9:7 Dcache Associativity - */ - -static char *way_string[] = { - "direct mapped", "2-way", "3-way", "4-way", - "5-way", "6-way", "7-way", "8-way", -}; - -static __init void probe_cache_sizes(void) -{ - u32 config1; - - config1 = read_c0_config1(); - icache_line_size = decode_cache_line_size((config1 >> 19) & 0x7); - dcache_line_size = decode_cache_line_size((config1 >> 10) & 0x7); - icache_sets = decode_cache_sets((config1 >> 22) & 0x7); - dcache_sets = decode_cache_sets((config1 >> 13) & 0x7); - icache_assoc = ((config1 >> 16) & 0x7) + 1; - dcache_assoc = ((config1 >> 7) & 0x7) + 1; - icache_size = icache_line_size * icache_sets * icache_assoc; - dcache_size = dcache_line_size * dcache_sets * dcache_assoc; - /* Need to remove non-index bits for index ops */ - icache_index_mask = (icache_sets - 1) * icache_line_size; - dcache_index_mask = (dcache_sets - 1) * dcache_line_size; - /* - * These are for choosing range (index ops) versus all. - * icache flushes all ways for each set, so drop icache_assoc. - * dcache flushes all ways and each setting of bit 12 for each - * index, so drop dcache_assoc and halve the dcache_sets. - */ - icache_range_cutoff = icache_sets * icache_line_size; - dcache_range_cutoff = (dcache_sets / 2) * icache_line_size; - - printk("Primary instruction cache %ldkB, %s, linesize %d bytes.\n", - icache_size >> 10, way_string[icache_assoc - 1], - icache_line_size); - printk("Primary data cache %ldkB, %s, linesize %d bytes.\n", - dcache_size >> 10, way_string[dcache_assoc - 1], - dcache_line_size); -} - -/* - * This is called from cache.c. We have to set up all the - * memory management function pointers, as well as initialize - * the caches and tlbs - */ -void __init sb1_cache_init(void) -{ - extern char except_vec2_sb1; - - /* Special cache error handler for SB1 */ - set_uncached_handler (0x100, &except_vec2_sb1, 0x80); - - probe_cache_sizes(); - -#ifdef CONFIG_SIBYTE_DMA_PAGEOPS - sb1_dma_init(); -#endif - - /* - * None of these are needed for the SB1 - the Dcache is - * physically indexed and tagged, so no virtual aliasing can - * occur - */ - flush_cache_range = (void *) sb1_nop; - flush_cache_mm = (void (*)(struct mm_struct *))sb1_nop; - flush_cache_all = sb1_nop; - - /* These routines are for Icache coherence with the Dcache */ - flush_icache_range = sb1_flush_icache_range; - flush_icache_all = __sb1_flush_icache_all; /* local only */ - - /* This implies an Icache flush too, so can't be nop'ed */ - flush_cache_page = sb1_flush_cache_page; - - flush_cache_sigtramp = sb1_flush_cache_sigtramp; - local_flush_data_cache_page = (void *) sb1_nop; - flush_data_cache_page = sb1_flush_cache_data_page; - - /* Full flush */ - __flush_cache_all = sb1___flush_cache_all; - - change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT); - - /* - * This is the only way to force the update of K0 to complete - * before subsequent instruction fetch. - */ - __asm__ __volatile__( - ".set push \n" - " .set noat \n" - " .set noreorder \n" - " .set mips3 \n" - " " STR(PTR_LA) " $1, 1f \n" - " " STR(MTC0) " $1, $14 \n" - " eret \n" - "1: .set pop" - : - : - : "memory"); - - local_sb1___flush_cache_all(); -} diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c index 560a6de..9ea121e 100644 --- a/arch/mips/mm/c-tx39.c +++ b/arch/mips/mm/c-tx39.c @@ -69,7 +69,7 @@ static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size) /* TX39H2,TX39H3 */ static inline void tx39_blast_dcache_page(unsigned long addr) { - if (current_cpu_data.cputype != CPU_TX3912) + if (current_cpu_type() != CPU_TX3912) blast_dcache16_page(addr); } @@ -307,7 +307,7 @@ static __init void tx39_probe_cache(void) TX39_CONF_DCS_SHIFT)); current_cpu_data.icache.linesz = 16; - switch (current_cpu_data.cputype) { + switch (current_cpu_type()) { case CPU_TX3912: current_cpu_data.icache.ways = 1; current_cpu_data.dcache.ways = 1; @@ -341,7 +341,7 @@ void __init tx39_cache_init(void) tx39_probe_cache(); - switch (current_cpu_data.cputype) { + switch (current_cpu_type()) { case CPU_TX3912: /* TX39/H core (writethru direct-map cache) */ flush_cache_all = tx39h_flush_icache_all; diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 81f925a..43dde87 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -3,13 +3,14 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1994 - 2003, 07 by Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2007 MIPS Technologies, Inc. */ #include <linux/fs.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/linkage.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/mm.h> @@ -157,12 +158,6 @@ void __init cpu_cache_init(void) tx39_cache_init(); return; } - if (cpu_has_sb1_cache) { - extern void __weak sb1_cache_init(void); - - sb1_cache_init(); - return; - } panic(cache_panic); } diff --git a/arch/mips/mm/cerr-sb1.c b/arch/mips/mm/cerr-sb1.c index 4c72e65..e7f539e3 100644 --- a/arch/mips/mm/cerr-sb1.c +++ b/arch/mips/mm/cerr-sb1.c @@ -271,14 +271,22 @@ asmlinkage void sb1_cache_error(void) /* Parity lookup table. */ static const uint8_t parity[256] = { - 0,1,1,0,1,0,0,1,1,0,0,1,0,1,1,0,1,0,0,1,0,1,1,0,0,1,1,0,1,0,0,1, - 1,0,0,1,0,1,1,0,0,1,1,0,1,0,0,1,0,1,1,0,1,0,0,1,1,0,0,1,0,1,1,0, - 1,0,0,1,0,1,1,0,0,1,1,0,1,0,0,1,0,1,1,0,1,0,0,1,1,0,0,1,0,1,1,0, - 0,1,1,0,1,0,0,1,1,0,0,1,0,1,1,0,1,0,0,1,0,1,1,0,0,1,1,0,1,0,0,1, - 1,0,0,1,0,1,1,0,0,1,1,0,1,0,0,1,0,1,1,0,1,0,0,1,1,0,0,1,0,1,1,0, - 0,1,1,0,1,0,0,1,1,0,0,1,0,1,1,0,1,0,0,1,0,1,1,0,0,1,1,0,1,0,0,1, - 0,1,1,0,1,0,0,1,1,0,0,1,0,1,1,0,1,0,0,1,0,1,1,0,0,1,1,0,1,0,0,1, - 1,0,0,1,0,1,1,0,0,1,1,0,1,0,0,1,0,1,1,0,1,0,0,1,1,0,0,1,0,1,1,0 + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0 }; /* Masks to select bits for Hamming parity, mask_72_64[i] for bit[i] */ diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index f60b3dc..98b5e5b 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -35,8 +35,8 @@ static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr) static inline int cpu_is_noncoherent_r10000(struct device *dev) { return !plat_device_is_coherent(dev) && - (current_cpu_data.cputype == CPU_R10000 || - current_cpu_data.cputype == CPU_R12000); + (current_cpu_type() == CPU_R10000 || + current_cpu_type() == CPU_R12000); } void *dma_alloc_noncoherent(struct device *dev, size_t size, diff --git a/arch/mips/mm/pg-r4k.c b/arch/mips/mm/pg-r4k.c index e47e9e9..4f770ac 100644 --- a/arch/mips/mm/pg-r4k.c +++ b/arch/mips/mm/pg-r4k.c @@ -347,13 +347,14 @@ void __init build_clear_page(void) { unsigned int loop_start; unsigned long off; + int i; epc = (unsigned int *) &clear_page_array; instruction_pending = 0; store_offset = 0; if (cpu_has_prefetch) { - switch (current_cpu_data.cputype) { + switch (current_cpu_type()) { case CPU_TX49XX: /* TX49 supports only Pref_Load */ pref_offset_clear = 0; @@ -434,12 +435,22 @@ dest = label(); build_jr_ra(); BUG_ON(epc > clear_page_array + ARRAY_SIZE(clear_page_array)); + + pr_info("Synthesized clear page handler (%u instructions).\n", + (unsigned int)(epc - clear_page_array)); + + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); + for (i = 0; i < (epc - clear_page_array); i++) + pr_debug("\t.word 0x%08x\n", clear_page_array[i]); + pr_debug("\t.set pop\n"); } void __init build_copy_page(void) { unsigned int loop_start; unsigned long off; + int i; epc = (unsigned int *) ©_page_array; store_offset = load_offset = 0; @@ -515,4 +526,13 @@ dest = label(); build_jr_ra(); BUG_ON(epc > copy_page_array + ARRAY_SIZE(copy_page_array)); + + pr_info("Synthesized copy page handler (%u instructions).\n", + (unsigned int)(epc - copy_page_array)); + + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); + for (i = 0; i < (epc - copy_page_array); i++) + pr_debug("\t.word 0x%08x\n", copy_page_array[i]); + pr_debug("\t.set pop\n"); } diff --git a/arch/mips/mm/pg-sb1.c b/arch/mips/mm/pg-sb1.c index adb37d0..a3e98c2 100644 --- a/arch/mips/mm/pg-sb1.c +++ b/arch/mips/mm/pg-sb1.c @@ -188,9 +188,9 @@ static inline void copy_page_cpu(void *to, void *from) : "+r" (src), "+r" (dst) : "r" (end) #ifdef CONFIG_64BIT - : "$8","$9","$10","$11","memory"); + : "$8", "$9", "$10", "$11", "memory"); #else - : "$2","$3","$6","$7","$8","$9","$10","$11","memory"); + : "$2", "$3", "$6", "$7", "$8", "$9", "$10", "$11", "memory"); #endif } @@ -292,3 +292,11 @@ void copy_page(void *to, void *from) EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(copy_page); + +void __init build_clear_page(void) +{ +} + +void __init build_copy_page(void) +{ +} diff --git a/arch/mips/mm/pgtable.c b/arch/mips/mm/pgtable.c index c93aa6c..57df1c3 100644 --- a/arch/mips/mm/pgtable.c +++ b/arch/mips/mm/pgtable.c @@ -29,9 +29,9 @@ void show_mem(void) shared += page_count(page) - 1; } printk("%d pages of RAM\n", total); - printk("%d pages of HIGHMEM\n",highmem); - printk("%d reserved pages\n",reserved); - printk("%d pages shared\n",shared); - printk("%d pages swap cached\n",cached); + printk("%d pages of HIGHMEM\n", highmem); + printk("%d reserved pages\n", reserved); + printk("%d pages shared\n", shared); + printk("%d pages swap cached\n", cached); #endif } diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 42b5096..c13170b 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -102,7 +102,7 @@ static inline int __init mips_sc_probe(void) int __init mips_sc_init(void) { - int found = mips_sc_probe (); + int found = mips_sc_probe(); if (found) { mips_sc_enable(); bcops = &mips_sc_ops; diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index dcd6913..74ae034 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -491,7 +491,7 @@ void __init tlb_init(void) int wired = current_cpu_data.tlbsize - ntlb; write_c0_wired(wired); write_c0_index(wired-1); - printk ("Restricting TLB to %d entries\n", ntlb); + printk("Restricting TLB to %d entries\n", ntlb); } else printk("Ignoring invalid argument ntlb=%d\n", ntlb); } diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c index 266a47d..bd8409d 100644 --- a/arch/mips/mm/tlb-r8k.c +++ b/arch/mips/mm/tlb-r8k.c @@ -56,7 +56,7 @@ void local_flush_tlb_mm(struct mm_struct *mm) int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) - drop_mmu_context(mm,cpu); + drop_mmu_context(mm, cpu); } void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 6c425b0..01b0961 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -35,24 +35,24 @@ #include <asm/smp.h> #include <asm/war.h> -static __init int __maybe_unused r45k_bvahwbug(void) +static inline int r45k_bvahwbug(void) { /* XXX: We should probe for the presence of this bug, but we don't. */ return 0; } -static __init int __maybe_unused r4k_250MHZhwbug(void) +static inline int r4k_250MHZhwbug(void) { /* XXX: We should probe for the presence of this bug, but we don't. */ return 0; } -static __init int __maybe_unused bcm1250_m3_war(void) +static inline int __maybe_unused bcm1250_m3_war(void) { return BCM1250_M3_WAR; } -static __init int __maybe_unused r10000_llsc_war(void) +static inline int __maybe_unused r10000_llsc_war(void) { return R10000_LLSC_WAR; } @@ -66,7 +66,7 @@ static __init int __maybe_unused r10000_llsc_war(void) * why; it's not an issue caused by the core RTL. * */ -static __init int __attribute__((unused)) m4kc_tlbp_war(void) +static int __init m4kc_tlbp_war(void) { return (current_cpu_data.processor_id & 0xffff00) == (PRID_COMP_MIPS | PRID_IMP_4KC); @@ -140,60 +140,60 @@ struct insn { | (e) << RE_SH \ | (f) << FUNC_SH) -static __initdata struct insn insn_table[] = { - { insn_addiu, M(addiu_op,0,0,0,0,0), RS | RT | SIMM }, - { insn_addu, M(spec_op,0,0,0,0,addu_op), RS | RT | RD }, - { insn_and, M(spec_op,0,0,0,0,and_op), RS | RT | RD }, - { insn_andi, M(andi_op,0,0,0,0,0), RS | RT | UIMM }, - { insn_beq, M(beq_op,0,0,0,0,0), RS | RT | BIMM }, - { insn_beql, M(beql_op,0,0,0,0,0), RS | RT | BIMM }, - { insn_bgez, M(bcond_op,0,bgez_op,0,0,0), RS | BIMM }, - { insn_bgezl, M(bcond_op,0,bgezl_op,0,0,0), RS | BIMM }, - { insn_bltz, M(bcond_op,0,bltz_op,0,0,0), RS | BIMM }, - { insn_bltzl, M(bcond_op,0,bltzl_op,0,0,0), RS | BIMM }, - { insn_bne, M(bne_op,0,0,0,0,0), RS | RT | BIMM }, - { insn_daddiu, M(daddiu_op,0,0,0,0,0), RS | RT | SIMM }, - { insn_daddu, M(spec_op,0,0,0,0,daddu_op), RS | RT | RD }, - { insn_dmfc0, M(cop0_op,dmfc_op,0,0,0,0), RT | RD | SET}, - { insn_dmtc0, M(cop0_op,dmtc_op,0,0,0,0), RT | RD | SET}, - { insn_dsll, M(spec_op,0,0,0,0,dsll_op), RT | RD | RE }, - { insn_dsll32, M(spec_op,0,0,0,0,dsll32_op), RT | RD | RE }, - { insn_dsra, M(spec_op,0,0,0,0,dsra_op), RT | RD | RE }, - { insn_dsrl, M(spec_op,0,0,0,0,dsrl_op), RT | RD | RE }, - { insn_dsrl32, M(spec_op,0,0,0,0,dsrl32_op), RT | RD | RE }, - { insn_dsubu, M(spec_op,0,0,0,0,dsubu_op), RS | RT | RD }, - { insn_eret, M(cop0_op,cop_op,0,0,0,eret_op), 0 }, - { insn_j, M(j_op,0,0,0,0,0), JIMM }, - { insn_jal, M(jal_op,0,0,0,0,0), JIMM }, - { insn_jr, M(spec_op,0,0,0,0,jr_op), RS }, - { insn_ld, M(ld_op,0,0,0,0,0), RS | RT | SIMM }, - { insn_ll, M(ll_op,0,0,0,0,0), RS | RT | SIMM }, - { insn_lld, M(lld_op,0,0,0,0,0), RS | RT | SIMM }, - { insn_lui, M(lui_op,0,0,0,0,0), RT | SIMM }, - { insn_lw, M(lw_op,0,0,0,0,0), RS | RT | SIMM }, - { insn_mfc0, M(cop0_op,mfc_op,0,0,0,0), RT | RD | SET}, - { insn_mtc0, M(cop0_op,mtc_op,0,0,0,0), RT | RD | SET}, - { insn_ori, M(ori_op,0,0,0,0,0), RS | RT | UIMM }, - { insn_rfe, M(cop0_op,cop_op,0,0,0,rfe_op), 0 }, - { insn_sc, M(sc_op,0,0,0,0,0), RS | RT | SIMM }, - { insn_scd, M(scd_op,0,0,0,0,0), RS | RT | SIMM }, - { insn_sd, M(sd_op,0,0,0,0,0), RS | RT | SIMM }, - { insn_sll, M(spec_op,0,0,0,0,sll_op), RT | RD | RE }, - { insn_sra, M(spec_op,0,0,0,0,sra_op), RT | RD | RE }, - { insn_srl, M(spec_op,0,0,0,0,srl_op), RT | RD | RE }, - { insn_subu, M(spec_op,0,0,0,0,subu_op), RS | RT | RD }, - { insn_sw, M(sw_op,0,0,0,0,0), RS | RT | SIMM }, - { insn_tlbp, M(cop0_op,cop_op,0,0,0,tlbp_op), 0 }, - { insn_tlbwi, M(cop0_op,cop_op,0,0,0,tlbwi_op), 0 }, - { insn_tlbwr, M(cop0_op,cop_op,0,0,0,tlbwr_op), 0 }, - { insn_xor, M(spec_op,0,0,0,0,xor_op), RS | RT | RD }, - { insn_xori, M(xori_op,0,0,0,0,0), RS | RT | UIMM }, +static struct insn insn_table[] __initdata = { + { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD }, + { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD }, + { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, + { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, + { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, + { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM }, + { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM }, + { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM }, + { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM }, + { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, + { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, + { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET}, + { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET}, + { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE }, + { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE }, + { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE }, + { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE }, + { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE }, + { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD }, + { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 }, + { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, + { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, + { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, + { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, + { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, + { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, + { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, + { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, + { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, + { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE }, + { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE }, + { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD }, + { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 }, + { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 }, + { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 }, + { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, + { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, { insn_invalid, 0, 0 } }; #undef M -static __init u32 build_rs(u32 arg) +static u32 __init build_rs(u32 arg) { if (arg & ~RS_MASK) printk(KERN_WARNING "TLB synthesizer field overflow\n"); @@ -201,7 +201,7 @@ static __init u32 build_rs(u32 arg) return (arg & RS_MASK) << RS_SH; } -static __init u32 build_rt(u32 arg) +static u32 __init build_rt(u32 arg) { if (arg & ~RT_MASK) printk(KERN_WARNING "TLB synthesizer field overflow\n"); @@ -209,7 +209,7 @@ static __init u32 build_rt(u32 arg) return (arg & RT_MASK) << RT_SH; } -static __init u32 build_rd(u32 arg) +static u32 __init build_rd(u32 arg) { if (arg & ~RD_MASK) printk(KERN_WARNING "TLB synthesizer field overflow\n"); @@ -217,7 +217,7 @@ static __init u32 build_rd(u32 arg) return (arg & RD_MASK) << RD_SH; } -static __init u32 build_re(u32 arg) +static u32 __init build_re(u32 arg) { if (arg & ~RE_MASK) printk(KERN_WARNING "TLB synthesizer field overflow\n"); @@ -225,7 +225,7 @@ static __init u32 build_re(u32 arg) return (arg & RE_MASK) << RE_SH; } -static __init u32 build_simm(s32 arg) +static u32 __init build_simm(s32 arg) { if (arg > 0x7fff || arg < -0x8000) printk(KERN_WARNING "TLB synthesizer field overflow\n"); @@ -233,7 +233,7 @@ static __init u32 build_simm(s32 arg) return arg & 0xffff; } -static __init u32 build_uimm(u32 arg) +static u32 __init build_uimm(u32 arg) { if (arg & ~IMM_MASK) printk(KERN_WARNING "TLB synthesizer field overflow\n"); @@ -241,7 +241,7 @@ static __init u32 build_uimm(u32 arg) return arg & IMM_MASK; } -static __init u32 build_bimm(s32 arg) +static u32 __init build_bimm(s32 arg) { if (arg > 0x1ffff || arg < -0x20000) printk(KERN_WARNING "TLB synthesizer field overflow\n"); @@ -252,7 +252,7 @@ static __init u32 build_bimm(s32 arg) return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff); } -static __init u32 build_jimm(u32 arg) +static u32 __init build_jimm(u32 arg) { if (arg & ~((JIMM_MASK) << 2)) printk(KERN_WARNING "TLB synthesizer field overflow\n"); @@ -260,7 +260,7 @@ static __init u32 build_jimm(u32 arg) return (arg >> 2) & JIMM_MASK; } -static __init u32 build_func(u32 arg) +static u32 __init build_func(u32 arg) { if (arg & ~FUNC_MASK) printk(KERN_WARNING "TLB synthesizer field overflow\n"); @@ -268,7 +268,7 @@ static __init u32 build_func(u32 arg) return arg & FUNC_MASK; } -static __init u32 build_set(u32 arg) +static u32 __init build_set(u32 arg) { if (arg & ~SET_MASK) printk(KERN_WARNING "TLB synthesizer field overflow\n"); @@ -315,69 +315,69 @@ static void __init build_insn(u32 **buf, enum opcode opc, ...) } #define I_u1u2u3(op) \ - static inline void __init i##op(u32 **buf, unsigned int a, \ + static inline void i##op(u32 **buf, unsigned int a, \ unsigned int b, unsigned int c) \ { \ build_insn(buf, insn##op, a, b, c); \ } #define I_u2u1u3(op) \ - static inline void __init i##op(u32 **buf, unsigned int a, \ + static inline void i##op(u32 **buf, unsigned int a, \ unsigned int b, unsigned int c) \ { \ build_insn(buf, insn##op, b, a, c); \ } #define I_u3u1u2(op) \ - static inline void __init i##op(u32 **buf, unsigned int a, \ + static inline void i##op(u32 **buf, unsigned int a, \ unsigned int b, unsigned int c) \ { \ build_insn(buf, insn##op, b, c, a); \ } #define I_u1u2s3(op) \ - static inline void __init i##op(u32 **buf, unsigned int a, \ + static inline void i##op(u32 **buf, unsigned int a, \ unsigned int b, signed int c) \ { \ build_insn(buf, insn##op, a, b, c); \ } #define I_u2s3u1(op) \ - static inline void __init i##op(u32 **buf, unsigned int a, \ + static inline void i##op(u32 **buf, unsigned int a, \ signed int b, unsigned int c) \ { \ build_insn(buf, insn##op, c, a, b); \ } #define I_u2u1s3(op) \ - static inline void __init i##op(u32 **buf, unsigned int a, \ + static inline void i##op(u32 **buf, unsigned int a, \ unsigned int b, signed int c) \ { \ build_insn(buf, insn##op, b, a, c); \ } #define I_u1u2(op) \ - static inline void __init i##op(u32 **buf, unsigned int a, \ + static inline void i##op(u32 **buf, unsigned int a, \ unsigned int b) \ { \ build_insn(buf, insn##op, a, b); \ } #define I_u1s2(op) \ - static inline void __init i##op(u32 **buf, unsigned int a, \ + static inline void i##op(u32 **buf, unsigned int a, \ signed int b) \ { \ build_insn(buf, insn##op, a, b); \ } #define I_u1(op) \ - static inline void __init i##op(u32 **buf, unsigned int a) \ + static inline void i##op(u32 **buf, unsigned int a) \ { \ build_insn(buf, insn##op, a); \ } #define I_0(op) \ - static inline void __init i##op(u32 **buf) \ + static inline void i##op(u32 **buf) \ { \ build_insn(buf, insn##op); \ } @@ -457,7 +457,7 @@ struct label { enum label_id lab; }; -static __init void build_label(struct label **lab, u32 *addr, +static void __init build_label(struct label **lab, u32 *addr, enum label_id l) { (*lab)->addr = addr; @@ -526,34 +526,34 @@ L_LA(_r3000_write_probe_fail) #define i_ehb(buf) i_sll(buf, 0, 0, 3) #ifdef CONFIG_64BIT -static __init int __maybe_unused in_compat_space_p(long addr) +static int __init __maybe_unused in_compat_space_p(long addr) { /* Is this address in 32bit compat space? */ return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L); } -static __init int __maybe_unused rel_highest(long val) +static int __init __maybe_unused rel_highest(long val) { return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000; } -static __init int __maybe_unused rel_higher(long val) +static int __init __maybe_unused rel_higher(long val) { return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000; } #endif -static __init int rel_hi(long val) +static int __init rel_hi(long val) { return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; } -static __init int rel_lo(long val) +static int __init rel_lo(long val) { return ((val & 0xffff) ^ 0x8000) - 0x8000; } -static __init void i_LA_mostly(u32 **buf, unsigned int rs, long addr) +static void __init i_LA_mostly(u32 **buf, unsigned int rs, long addr) { #ifdef CONFIG_64BIT if (!in_compat_space_p(addr)) { @@ -571,7 +571,7 @@ static __init void i_LA_mostly(u32 **buf, unsigned int rs, long addr) i_lui(buf, rs, rel_hi(addr)); } -static __init void __maybe_unused i_LA(u32 **buf, unsigned int rs, +static void __init __maybe_unused i_LA(u32 **buf, unsigned int rs, long addr) { i_LA_mostly(buf, rs, addr); @@ -589,7 +589,7 @@ struct reloc { enum label_id lab; }; -static __init void r_mips_pc16(struct reloc **rel, u32 *addr, +static void __init r_mips_pc16(struct reloc **rel, u32 *addr, enum label_id l) { (*rel)->addr = addr; @@ -614,7 +614,7 @@ static inline void __resolve_relocs(struct reloc *rel, struct label *lab) } } -static __init void resolve_relocs(struct reloc *rel, struct label *lab) +static void __init resolve_relocs(struct reloc *rel, struct label *lab) { struct label *l; @@ -624,7 +624,7 @@ static __init void resolve_relocs(struct reloc *rel, struct label *lab) __resolve_relocs(rel, l); } -static __init void move_relocs(struct reloc *rel, u32 *first, u32 *end, +static void __init move_relocs(struct reloc *rel, u32 *first, u32 *end, long off) { for (; rel->lab != label_invalid; rel++) @@ -632,7 +632,7 @@ static __init void move_relocs(struct reloc *rel, u32 *first, u32 *end, rel->addr += off; } -static __init void move_labels(struct label *lab, u32 *first, u32 *end, +static void __init move_labels(struct label *lab, u32 *first, u32 *end, long off) { for (; lab->lab != label_invalid; lab++) @@ -640,7 +640,7 @@ static __init void move_labels(struct label *lab, u32 *first, u32 *end, lab->addr += off; } -static __init void copy_handler(struct reloc *rel, struct label *lab, +static void __init copy_handler(struct reloc *rel, struct label *lab, u32 *first, u32 *end, u32 *target) { long off = (long)(target - first); @@ -651,7 +651,7 @@ static __init void copy_handler(struct reloc *rel, struct label *lab, move_labels(lab, first, end, off); } -static __init int __maybe_unused insn_has_bdelay(struct reloc *rel, +static int __init __maybe_unused insn_has_bdelay(struct reloc *rel, u32 *addr) { for (; rel->lab != label_invalid; rel++) { @@ -743,11 +743,11 @@ il_bgez(u32 **p, struct reloc **r, unsigned int reg, enum label_id l) * We deliberately chose a buffer size of 128, so we won't scribble * over anything important on overflow before we panic. */ -static __initdata u32 tlb_handler[128]; +static u32 tlb_handler[128] __initdata; /* simply assume worst case size for labels and relocs */ -static __initdata struct label labels[128]; -static __initdata struct reloc relocs[128]; +static struct label labels[128] __initdata; +static struct reloc relocs[128] __initdata; /* * The R3000 TLB handler is simple. @@ -801,7 +801,7 @@ static void __init build_r3000_tlb_refill_handler(void) * other one.To keep things simple, we first assume linear space, * then we relocate it to the final handler layout as needed. */ -static __initdata u32 final_handler[64]; +static u32 final_handler[64] __initdata; /* * Hazards @@ -825,9 +825,9 @@ static __initdata u32 final_handler[64]; * * As if we MIPS hackers wouldn't know how to nop pipelines happy ... */ -static __init void __maybe_unused build_tlb_probe_entry(u32 **p) +static void __init __maybe_unused build_tlb_probe_entry(u32 **p) { - switch (current_cpu_data.cputype) { + switch (current_cpu_type()) { /* Found by experiment: R4600 v2.0 needs this, too. */ case CPU_R4600: case CPU_R5000: @@ -849,7 +849,7 @@ static __init void __maybe_unused build_tlb_probe_entry(u32 **p) */ enum tlb_write_entry { tlb_random, tlb_indexed }; -static __init void build_tlb_write_entry(u32 **p, struct label **l, +static void __init build_tlb_write_entry(u32 **p, struct label **l, struct reloc **r, enum tlb_write_entry wmode) { @@ -860,7 +860,7 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l, case tlb_indexed: tlbw = i_tlbwi; break; } - switch (current_cpu_data.cputype) { + switch (current_cpu_type()) { case CPU_R4000PC: case CPU_R4000SC: case CPU_R4000MC: @@ -908,6 +908,8 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l, case CPU_4KSC: case CPU_20KC: case CPU_25KF: + case CPU_BCM3302: + case CPU_BCM4710: case CPU_LOONGSON2: if (m4kc_tlbp_war()) i_nop(p); @@ -991,7 +993,7 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l, * TMP and PTR are scratch. * TMP will be clobbered, PTR will hold the pmd entry. */ -static __init void +static void __init build_get_pmde64(u32 **p, struct label **l, struct reloc **r, unsigned int tmp, unsigned int ptr) { @@ -1052,7 +1054,7 @@ build_get_pmde64(u32 **p, struct label **l, struct reloc **r, * BVADDR is the faulting address, PTR is scratch. * PTR will hold the pgd for vmalloc. */ -static __init void +static void __init build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r, unsigned int bvaddr, unsigned int ptr) { @@ -1116,7 +1118,7 @@ build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r, * TMP and PTR are scratch. * TMP will be clobbered, PTR will hold the pgd entry. */ -static __init void __maybe_unused +static void __init __maybe_unused build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) { long pgdc = (long)pgd_current; @@ -1151,12 +1153,12 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) #endif /* !CONFIG_64BIT */ -static __init void build_adjust_context(u32 **p, unsigned int ctx) +static void __init build_adjust_context(u32 **p, unsigned int ctx) { unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); - switch (current_cpu_data.cputype) { + switch (current_cpu_type()) { case CPU_VR41XX: case CPU_VR4111: case CPU_VR4121: @@ -1177,7 +1179,7 @@ static __init void build_adjust_context(u32 **p, unsigned int ctx) i_andi(p, ctx, ctx, mask); } -static __init void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) +static void __init build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) { /* * Bug workaround for the Nevada. It seems as if under certain @@ -1186,7 +1188,7 @@ static __init void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) * in a different cacheline or a load instruction, probably any * memory reference, is between them. */ - switch (current_cpu_data.cputype) { + switch (current_cpu_type()) { case CPU_NEVADA: i_LW(p, ptr, 0, ptr); GET_CONTEXT(p, tmp); /* get context reg */ @@ -1202,7 +1204,7 @@ static __init void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) i_ADDU(p, ptr, ptr, tmp); /* add in offset */ } -static __init void build_update_entries(u32 **p, unsigned int tmp, +static void __init build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) { /* @@ -1870,7 +1872,7 @@ void __init build_tlb_refill_handler(void) */ static int run_once = 0; - switch (current_cpu_data.cputype) { + switch (current_cpu_type()) { case CPU_R2000: case CPU_R3000: case CPU_R3000A: |