From 317aa408d69a5b833a116317c18c7e957989ce44 Mon Sep 17 00:00:00 2001 From: Anders Larsen Date: Thu, 4 Mar 2010 11:22:53 +0100 Subject: ARM: 5975/1: AT91 slow-clock suspend: don't wait when turning PLLs off From: Julien Langer AT91: when turning off the PLLs during suspend, don't wait for the lock flag to be set. Previously the code would always run into the loop limitation of 1000 iterations because the flag is never set when turning the PLLs off. Comments from Anders Larsen: (in http://marc.info/?l=linux-kernel&m=127058929724193&w=2) Signed-off-by: Julien Langer Signed-off-by: Anders Larsen Acked-by: Andrew Victor Signed-off-by: Russell King diff --git a/arch/arm/mach-at91/pm_slowclock.S b/arch/arm/mach-at91/pm_slowclock.S index 9fcbd6c..9c5b48e 100644 --- a/arch/arm/mach-at91/pm_slowclock.S +++ b/arch/arm/mach-at91/pm_slowclock.S @@ -175,8 +175,6 @@ ENTRY(at91_slow_clock) orr r3, r3, #(1 << 29) /* bit 29 always set */ str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)] - wait_pllalock - /* Save PLLB setting and disable it */ ldr r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] str r3, .saved_pllbr @@ -184,8 +182,6 @@ ENTRY(at91_slow_clock) mov r3, #AT91_PMC_PLLCOUNT str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] - wait_pllblock - /* Turn off the main oscillator */ ldr r3, [r1, #(AT91_CKGR_MOR - AT91_PMC)] bic r3, r3, #AT91_PMC_MOSCEN -- cgit v0.10.2 From 7e5a69e83ba7a0d5917ad830f417cba8b8d6aa72 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 29 Mar 2010 21:46:02 +0100 Subject: ARM: 6007/1: fix highmem with VIPT cache and DMA The VIVT cache of a highmem page is always flushed before the page is unmapped. This cache flush is explicit through flush_cache_kmaps() in flush_all_zero_pkmaps(), or through __cpuc_flush_dcache_area() in kunmap_atomic(). There is also an implicit flush of those highmem pages that were part of a process that just terminated making those pages free as the whole VIVT cache has to be flushed on every task switch. Hence unmapped highmem pages need no cache maintenance in that case. However unmapped pages may still be cached with a VIPT cache because the cache is tagged with physical addresses. There is no need for a whole cache flush during task switching for that reason, and despite the explicit cache flushes in flush_all_zero_pkmaps() and kunmap_atomic(), some highmem pages that were mapped in user space end up still cached even when they become unmapped. So, we do have to perform cache maintenance on those unmapped highmem pages in the context of DMA when using a VIPT cache. Unfortunately, it is not possible to perform that cache maintenance using physical addresses as all the L1 cache maintenance coprocessor functions accept virtual addresses only. Therefore we have no choice but to set up a temporary virtual mapping for that purpose. And of course the explicit cache flushing when unmapping a highmem page on a system with a VIPT cache now can go, which should increase performance. While at it, because the code in __flush_dcache_page() has to be modified anyway, let's also make sure the mapped highmem pages are pinned with kmap_high_get() for the duration of the cache maintenance operation. Because kunmap() does unmap highmem pages lazily, it was reported by Gary King that those pages ended up being unmapped during cache maintenance on SMP causing segmentation faults. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 7f36d00..feb988a 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -11,7 +11,11 @@ #define kmap_prot PAGE_KERNEL -#define flush_cache_kmaps() flush_cache_all() +#define flush_cache_kmaps() \ + do { \ + if (cache_is_vivt()) \ + flush_cache_all(); \ + } while (0) extern pte_t *pkmap_page_table; @@ -21,11 +25,20 @@ extern void *kmap_high(struct page *page); extern void *kmap_high_get(struct page *page); extern void kunmap_high(struct page *page); +extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte); +extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte); + +/* + * The following functions are already defined by + * when CONFIG_HIGHMEM is not set. + */ +#ifdef CONFIG_HIGHMEM extern void *kmap(struct page *page); extern void kunmap(struct page *page); extern void *kmap_atomic(struct page *page, enum km_type type); extern void kunmap_atomic(void *kvaddr, enum km_type type); extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); extern struct page *kmap_atomic_to_page(const void *ptr); +#endif #endif diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h index c019949..c4b2ea3 100644 --- a/arch/arm/include/asm/kmap_types.h +++ b/arch/arm/include/asm/kmap_types.h @@ -18,6 +18,7 @@ enum km_type { KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, + KM_L1_CACHE, KM_L2_CACHE, KM_TYPE_NR }; diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 8bca4de..f55fa10 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -41,14 +41,7 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, kfrom = kmap_atomic(from, KM_USER0); kto = kmap_atomic(to, KM_USER1); copy_page(kto, kfrom); -#ifdef CONFIG_HIGHMEM - /* - * kmap_atomic() doesn't set the page virtual address, and - * kunmap_atomic() takes care of cache flushing already. - */ - if (page_address(to) != NULL) -#endif - __cpuc_flush_dcache_area(kto, PAGE_SIZE); + __cpuc_flush_dcache_area(kto, PAGE_SIZE); kunmap_atomic(kto, KM_USER1); kunmap_atomic(kfrom, KM_USER0); } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 1351edc..13fa536 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -464,6 +464,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, vaddr += offset; op(vaddr, len, dir); kunmap_high(page); + } else if (cache_is_vipt()) { + pte_t saved_pte; + vaddr = kmap_high_l1_vipt(page, &saved_pte); + op(vaddr + offset, len, dir); + kunmap_high_l1_vipt(page, saved_pte); } } else { vaddr = page_address(page) + offset; diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index e34f095..c6844cb 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -152,21 +153,25 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, void __flush_dcache_page(struct address_space *mapping, struct page *page) { - void *addr = page_address(page); - /* * Writeback any data associated with the kernel mapping of this * page. This ensures that data in the physical page is mutually * coherent with the kernels mapping. */ -#ifdef CONFIG_HIGHMEM - /* - * kmap_atomic() doesn't set the page virtual address, and - * kunmap_atomic() takes care of cache flushing already. - */ - if (addr) -#endif - __cpuc_flush_dcache_area(addr, PAGE_SIZE); + if (!PageHighMem(page)) { + __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); + } else { + void *addr = kmap_high_get(page); + if (addr) { + __cpuc_flush_dcache_area(addr, PAGE_SIZE); + kunmap_high(page); + } else if (cache_is_vipt()) { + pte_t saved_pte; + addr = kmap_high_l1_vipt(page, &saved_pte); + __cpuc_flush_dcache_area(addr, PAGE_SIZE); + kunmap_high_l1_vipt(page, saved_pte); + } + } /* * If this is a page cache page, and we have an aliasing VIPT cache, diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 2be1ec7..77b030f 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -79,7 +79,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type) unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); if (kvaddr >= (void *)FIXADDR_START) { - __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); + if (cache_is_vivt()) + __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); @@ -124,3 +125,87 @@ struct page *kmap_atomic_to_page(const void *ptr) pte = TOP_PTE(vaddr); return pte_page(*pte); } + +#ifdef CONFIG_CPU_CACHE_VIPT + +#include + +/* + * The VIVT cache of a highmem page is always flushed before the page + * is unmapped. Hence unmapped highmem pages need no cache maintenance + * in that case. + * + * However unmapped pages may still be cached with a VIPT cache, and + * it is not possible to perform cache maintenance on them using physical + * addresses unfortunately. So we have no choice but to set up a temporary + * virtual mapping for that purpose. + * + * Yet this VIPT cache maintenance may be triggered from DMA support + * functions which are possibly called from interrupt context. As we don't + * want to keep interrupt disabled all the time when such maintenance is + * taking place, we therefore allow for some reentrancy by preserving and + * restoring the previous fixmap entry before the interrupted context is + * resumed. If the reentrancy depth is 0 then there is no need to restore + * the previous fixmap, and leaving the current one in place allow it to + * be reused the next time without a TLB flush (common with DMA). + */ + +static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth); + +void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) +{ + unsigned int idx, cpu = smp_processor_id(); + int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); + unsigned long vaddr, flags; + pte_t pte, *ptep; + + idx = KM_L1_CACHE + KM_TYPE_NR * cpu; + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + ptep = TOP_PTE(vaddr); + pte = mk_pte(page, kmap_prot); + + if (!in_interrupt()) + preempt_disable(); + + raw_local_irq_save(flags); + (*depth)++; + if (pte_val(*ptep) == pte_val(pte)) { + *saved_pte = pte; + } else { + *saved_pte = *ptep; + set_pte_ext(ptep, pte, 0); + local_flush_tlb_kernel_page(vaddr); + } + raw_local_irq_restore(flags); + + return (void *)vaddr; +} + +void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) +{ + unsigned int idx, cpu = smp_processor_id(); + int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); + unsigned long vaddr, flags; + pte_t pte, *ptep; + + idx = KM_L1_CACHE + KM_TYPE_NR * cpu; + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + ptep = TOP_PTE(vaddr); + pte = mk_pte(page, kmap_prot); + + BUG_ON(pte_val(*ptep) != pte_val(pte)); + BUG_ON(*depth <= 0); + + raw_local_irq_save(flags); + (*depth)--; + if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { + set_pte_ext(ptep, saved_pte, 0); + local_flush_tlb_kernel_page(vaddr); + } + raw_local_irq_restore(flags); + + if (!in_interrupt()) + preempt_enable(); +} + +#endif /* CONFIG_CPU_CACHE_VIPT */ -- cgit v0.10.2 From 5c5cac63851f347d8308d69f1892c4af51d7c1a4 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Sun, 11 Apr 2010 15:57:07 +0100 Subject: ARM: 6050/1: VFP: fix the SMP versions of vfp_{sync,flush}_hwstate From: Imre Deak Recently the UP versions of these functions were refactored and as a side effect it became possible to call them for the current thread. This isn't true for the SMP versions however, so fix this up. Signed-off-by: Imre Deak Signed-off-by: Russell King diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index a420cb9..315a540 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -428,26 +428,6 @@ static void vfp_pm_init(void) static inline void vfp_pm_init(void) { } #endif /* CONFIG_PM */ -/* - * Synchronise the hardware VFP state of a thread other than current with the - * saved one. This function is used by the ptrace mechanism. - */ -#ifdef CONFIG_SMP -void vfp_sync_hwstate(struct thread_info *thread) -{ -} - -void vfp_flush_hwstate(struct thread_info *thread) -{ - /* - * On SMP systems, the VFP state is automatically saved at every - * context switch. We mark the thread VFP state as belonging to a - * non-existent CPU so that the saved one will be reloaded when - * needed. - */ - thread->vfpstate.hard.cpu = NR_CPUS; -} -#else void vfp_sync_hwstate(struct thread_info *thread) { unsigned int cpu = get_cpu(); @@ -490,9 +470,18 @@ void vfp_flush_hwstate(struct thread_info *thread) last_VFP_context[cpu] = NULL; } +#ifdef CONFIG_SMP + /* + * For SMP we still have to take care of the case where the thread + * migrates to another CPU and then back to the original CPU on which + * the last VFP user is still the same thread. Mark the thread VFP + * state as belonging to a non-existent CPU so that the saved one will + * be reloaded in the above case. + */ + thread->vfpstate.hard.cpu = NR_CPUS; +#endif put_cpu(); } -#endif #include -- cgit v0.10.2 From 82c6f5a5b3e91ef4d2fb8725de4b8cf7affd4d61 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Sun, 11 Apr 2010 15:58:27 +0100 Subject: ARM: 6051/1: VFP: preserve the HW context when calling signal handlers From: Imre Deak Signal handlers can use floating point, so prevent them to corrupt the main thread's VFP context. So far there were two signal stack frame formats defined based on the VFP implementation, but the user struct used for ptrace covers all posibilities, so use it for the signal stack too. Introduce also a new user struct for VFP exception registers. In this too fields not relevant to the current VFP architecture are ignored. Support to save / restore the exception registers was added by Will Deacon. Signed-off-by: Imre Deak Signed-off-by: Will Deacon Signed-off-by: Russell King diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h index bf65e9f..47f023a 100644 --- a/arch/arm/include/asm/ucontext.h +++ b/arch/arm/include/asm/ucontext.h @@ -59,23 +59,22 @@ struct iwmmxt_sigframe { #endif /* CONFIG_IWMMXT */ #ifdef CONFIG_VFP -#if __LINUX_ARM_ARCH__ < 6 -/* For ARM pre-v6, we use fstmiax and fldmiax. This adds one extra - * word after the registers, and a word of padding at the end for - * alignment. */ #define VFP_MAGIC 0x56465001 -#define VFP_STORAGE_SIZE 152 -#else -#define VFP_MAGIC 0x56465002 -#define VFP_STORAGE_SIZE 144 -#endif struct vfp_sigframe { unsigned long magic; unsigned long size; - union vfp_state storage; -}; + struct user_vfp ufp; + struct user_vfp_exc ufp_exc; +} __attribute__((__aligned__(8))); + +/* + * 8 byte for magic and size, 264 byte for ufp, 12 bytes for ufp_exc, + * 4 bytes padding. + */ +#define VFP_STORAGE_SIZE sizeof(struct vfp_sigframe) + #endif /* CONFIG_VFP */ /* @@ -91,7 +90,7 @@ struct aux_sigframe { #ifdef CONFIG_IWMMXT struct iwmmxt_sigframe iwmmxt; #endif -#if 0 && defined CONFIG_VFP /* Not yet saved. */ +#ifdef CONFIG_VFP struct vfp_sigframe vfp; #endif /* Something that isn't a valid magic number for any coprocessor. */ diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h index df95e05..05ac4b0 100644 --- a/arch/arm/include/asm/user.h +++ b/arch/arm/include/asm/user.h @@ -83,11 +83,21 @@ struct user{ /* * User specific VFP registers. If only VFPv2 is present, registers 16 to 31 - * are ignored by the ptrace system call. + * are ignored by the ptrace system call and the signal handler. */ struct user_vfp { unsigned long long fpregs[32]; unsigned long fpscr; }; +/* + * VFP exception registers exposed to user space during signal delivery. + * Fields not relavant to the current VFP architecture are ignored. + */ +struct user_vfp_exc { + unsigned long fpexc; + unsigned long fpinst; + unsigned long fpinst2; +}; + #endif /* _ARM_USER_H */ diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index e7714f3..907d5a6 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "ptrace.h" #include "signal.h" @@ -175,6 +176,90 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) #endif +#ifdef CONFIG_VFP + +static int preserve_vfp_context(struct vfp_sigframe __user *frame) +{ + struct thread_info *thread = current_thread_info(); + struct vfp_hard_struct *h = &thread->vfpstate.hard; + const unsigned long magic = VFP_MAGIC; + const unsigned long size = VFP_STORAGE_SIZE; + int err = 0; + + vfp_sync_hwstate(thread); + __put_user_error(magic, &frame->magic, err); + __put_user_error(size, &frame->size, err); + + /* + * Copy the floating point registers. There can be unused + * registers see asm/hwcap.h for details. + */ + err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs, + sizeof(h->fpregs)); + /* + * Copy the status and control register. + */ + __put_user_error(h->fpscr, &frame->ufp.fpscr, err); + + /* + * Copy the exception registers. + */ + __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err); + __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); + __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); + + return err ? -EFAULT : 0; +} + +static int restore_vfp_context(struct vfp_sigframe __user *frame) +{ + struct thread_info *thread = current_thread_info(); + struct vfp_hard_struct *h = &thread->vfpstate.hard; + unsigned long magic; + unsigned long size; + unsigned long fpexc; + int err = 0; + + __get_user_error(magic, &frame->magic, err); + __get_user_error(size, &frame->size, err); + + if (err) + return -EFAULT; + if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) + return -EINVAL; + + /* + * Copy the floating point registers. There can be unused + * registers see asm/hwcap.h for details. + */ + err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs, + sizeof(h->fpregs)); + /* + * Copy the status and control register. + */ + __get_user_error(h->fpscr, &frame->ufp.fpscr, err); + + /* + * Sanitise and restore the exception registers. + */ + __get_user_error(fpexc, &frame->ufp_exc.fpexc, err); + /* Ensure the VFP is enabled. */ + fpexc |= FPEXC_EN; + /* Ensure FPINST2 is invalid and the exception flag is cleared. */ + fpexc &= ~(FPEXC_EX | FPEXC_FP2V); + h->fpexc = fpexc; + + __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); + __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); + + if (!err) + vfp_flush_hwstate(thread); + + return err ? -EFAULT : 0; +} + +#endif + /* * Do a signal return; undo the signal stack. These are aligned to 64-bit. */ @@ -233,8 +318,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) err |= restore_iwmmxt_context(&aux->iwmmxt); #endif #ifdef CONFIG_VFP -// if (err == 0) -// err |= vfp_restore_state(&sf->aux.vfp); + if (err == 0) + err |= restore_vfp_context(&aux->vfp); #endif return err; @@ -348,8 +433,8 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) err |= preserve_iwmmxt_context(&aux->iwmmxt); #endif #ifdef CONFIG_VFP -// if (err == 0) -// err |= vfp_save_state(&sf->aux.vfp); + if (err == 0) + err |= preserve_vfp_context(&aux->vfp); #endif __put_user_error(0, &aux->end_magic, err); -- cgit v0.10.2 From 3f2d4f561fab4588344cc519fd323382ab950928 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 13 Apr 2010 07:01:46 +0100 Subject: ARM: 6052/1: kdump: make kexec work in interrupt context When crash happens in interrupt context there is no userspace context. We always use current->active_mm in those cases. Signed-off-by: Mika Westerberg Signed-off-by: Russell King diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4223d08..241c24a 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1054,10 +1054,12 @@ void setup_mm_for_reboot(char mode) pgd_t *pgd; int i; - if (current->mm && current->mm->pgd) - pgd = current->mm->pgd; - else - pgd = init_mm.pgd; + /* + * We need to access to user-mode page tables here. For kernel threads + * we don't have any user-mode mappings so we use the context that we + * "borrowed". + */ + pgd = current->active_mm->pgd; base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) -- cgit v0.10.2 From b1cdbb5f8342d99b732c5535ee7d2de8e7b2cc2e Mon Sep 17 00:00:00 2001 From: Ernst Schwab Date: Thu, 4 Mar 2010 10:15:39 +0100 Subject: ARM: 5974/1: arm/mach-at91 Makefile: remove two blanks. Cosmetic change to mach-at91 Makefile: remove two blanks introduced by earlier patches. Signed-off-by: Ernst Schwab Signed-off-by: Russell King diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile index 027dd57..d400455 100644 --- a/arch/arm/mach-at91/Makefile +++ b/arch/arm/mach-at91/Makefile @@ -16,8 +16,8 @@ obj-$(CONFIG_ARCH_AT91SAM9261) += at91sam9261.o at91sam926x_time.o at91sam9261_d obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o -obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o - obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o +obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o +obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o obj-$(CONFIG_ARCH_AT572D940HF) += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o -- cgit v0.10.2