diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-08-15 00:49:32 (GMT) |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-08-15 00:49:32 (GMT) |
commit | dde5e3ffb770ef2854bbc32c51a365e932919e19 (patch) | |
tree | 1b7936b8068f3532892b30a526d23b79bbe401f5 /arch/sh | |
parent | cbbe2f68f678a90bebeb30b8a7fcd8aed0614879 (diff) | |
download | linux-dde5e3ffb770ef2854bbc32c51a365e932919e19.tar.xz |
sh: rework nommu for generic cache.c use.
This does a bit of reorganizing for allowing nommu to use the new
and generic cache.c, no functional changes.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/include/asm/cacheflush.h | 2 | ||||
-rw-r--r-- | arch/sh/include/asm/page.h | 7 | ||||
-rw-r--r-- | arch/sh/include/cpu-sh3/cpu/cacheflush.h | 2 | ||||
-rw-r--r-- | arch/sh/include/cpu-sh4/cpu/cacheflush.h | 2 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/init.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/mmap.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/tlb-nommu.c | 5 |
7 files changed, 4 insertions, 18 deletions
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index 4bf621e..9ec13fb 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h @@ -76,5 +76,7 @@ void kmap_coherent_init(void); void *kmap_coherent(struct page *page, unsigned long addr); void kunmap_coherent(void); +#define PG_dcache_dirty PG_arch_1 + #endif /* __KERNEL__ */ #endif /* __ASM_SH_CACHEFLUSH_H */ diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index 847eeab..a316eeb 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -68,18 +68,13 @@ extern void clear_user_page(void *to, unsigned long address, struct page *page); extern void copy_user_page(void *to, void *from, unsigned long address, struct page *page); -#elif defined(CONFIG_MMU) +#else extern void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma); #define __HAVE_ARCH_COPY_USER_HIGHPAGE extern void clear_user_highpage(struct page *page, unsigned long vaddr); #define clear_user_highpage clear_user_highpage -#else - -#define clear_user_page(page, vaddr, pg) clear_page(page) -#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) - #endif /* diff --git a/arch/sh/include/cpu-sh3/cpu/cacheflush.h b/arch/sh/include/cpu-sh3/cpu/cacheflush.h index 6485ad5..3b5f3df 100644 --- a/arch/sh/include/cpu-sh3/cpu/cacheflush.h +++ b/arch/sh/include/cpu-sh3/cpu/cacheflush.h @@ -15,8 +15,6 @@ * SH4. Unlike the SH4 this is a unified cache so we need to do some work * in mmap when 'exec'ing a new binary */ -#define PG_dcache_dirty PG_arch_1 - void flush_cache_all(void); void flush_cache_mm(struct mm_struct *mm); #define flush_cache_dup_mm(mm) flush_cache_mm(mm) diff --git a/arch/sh/include/cpu-sh4/cpu/cacheflush.h b/arch/sh/include/cpu-sh4/cpu/cacheflush.h index 3564f17..76764f0 100644 --- a/arch/sh/include/cpu-sh4/cpu/cacheflush.h +++ b/arch/sh/include/cpu-sh4/cpu/cacheflush.h @@ -38,6 +38,4 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, /* Initialization of P3 area for copy_user_page */ void p3_cache_init(void); -#define PG_dcache_dirty PG_arch_1 - #endif /* __ASM_CPU_SH4_CACHEFLUSH_H */ diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index ad85421..c832fa4 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c @@ -268,11 +268,9 @@ asmlinkage void __init sh_cpu_init(void) cache_init(); if (raw_smp_processor_id() == 0) { -#ifdef CONFIG_MMU shm_align_mask = max_t(unsigned long, current_cpu_data.dcache.way_size - 1, PAGE_SIZE - 1); -#endif /* Boot CPU sets the cache shape */ detect_cache_shape(); diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 1b5fdfb..d2984fa 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -14,10 +14,10 @@ #include <asm/page.h> #include <asm/processor.h> -#ifdef CONFIG_MMU unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ EXPORT_SYMBOL(shm_align_mask); +#ifdef CONFIG_MMU /* * To avoid cache aliases, we map the shared page with same color. */ diff --git a/arch/sh/mm/tlb-nommu.c b/arch/sh/mm/tlb-nommu.c index c49e9d2..0710ebb 100644 --- a/arch/sh/mm/tlb-nommu.c +++ b/arch/sh/mm/tlb-nommu.c @@ -50,11 +50,6 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { } -void __update_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte) -{ -} - void __init kmap_coherent_init(void) { } |