diff options
author | Dave Jones <davej@redhat.com> | 2006-12-12 22:41:41 (GMT) |
---|---|---|
committer | Dave Jones <davej@redhat.com> | 2006-12-12 22:41:41 (GMT) |
commit | c4366889dda8110247be59ca41fddb82951a8c26 (patch) | |
tree | 705c1a996bed8fd48ce94ff33ec9fd00f9b94875 /arch/arm/kernel | |
parent | db2fb9db5735cc532fd4fc55e94b9a3c3750378e (diff) | |
parent | e1036502e5263851259d147771226161e5ccc85a (diff) | |
download | linux-fsl-qoriq-c4366889dda8110247be59ca41fddb82951a8c26.tar.xz |
Merge ../linus
Conflicts:
drivers/cpufreq/cpufreq.c
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/Makefile | 4 | ||||
-rw-r--r-- | arch/arm/kernel/apm.c | 187 | ||||
-rw-r--r-- | arch/arm/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | arch/arm/kernel/ecard.c | 10 | ||||
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 9 | ||||
-rw-r--r-- | arch/arm/kernel/head-nommu.S | 1 | ||||
-rw-r--r-- | arch/arm/kernel/head.S | 1 | ||||
-rw-r--r-- | arch/arm/kernel/irq.c | 8 | ||||
-rw-r--r-- | arch/arm/kernel/iwmmxt-notifier.c | 63 | ||||
-rw-r--r-- | arch/arm/kernel/process.c | 61 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 12 | ||||
-rw-r--r-- | arch/arm/kernel/signal.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 21 | ||||
-rw-r--r-- | arch/arm/kernel/time.c | 7 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 7 | ||||
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 8 | ||||
-rw-r--r-- | arch/arm/kernel/xscale-cp0.c | 179 |
17 files changed, 353 insertions, 228 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 1320a0e..ab06a86 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -24,7 +24,9 @@ obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312 -obj-$(CONFIG_IWMMXT) += iwmmxt.o iwmmxt-notifier.o +obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o +obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o +obj-$(CONFIG_IWMMXT) += iwmmxt.o AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt ifneq ($(CONFIG_ARCH_EBSA110),y) diff --git a/arch/arm/kernel/apm.c b/arch/arm/kernel/apm.c index ecf4f94..a11fb9a 100644 --- a/arch/arm/kernel/apm.c +++ b/arch/arm/kernel/apm.c @@ -12,7 +12,6 @@ */ #include <linux/module.h> #include <linux/poll.h> -#include <linux/timer.h> #include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/miscdevice.h> @@ -26,6 +25,7 @@ #include <linux/init.h> #include <linux/completion.h> #include <linux/kthread.h> +#include <linux/delay.h> #include <asm/apm.h> /* apm_power_info */ #include <asm/system.h> @@ -71,7 +71,8 @@ struct apm_user { #define SUSPEND_PENDING 1 /* suspend pending read */ #define SUSPEND_READ 2 /* suspend read, pending ack */ #define SUSPEND_ACKED 3 /* suspend acked */ -#define SUSPEND_DONE 4 /* suspend completed */ +#define SUSPEND_WAIT 4 /* waiting for suspend */ +#define SUSPEND_DONE 5 /* suspend completed */ struct apm_queue queue; }; @@ -101,6 +102,7 @@ static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait); static DEFINE_SPINLOCK(kapmd_queue_lock); static struct apm_queue kapmd_queue; +static DEFINE_MUTEX(state_lock); static const char driver_version[] = "1.13"; /* no spaces */ @@ -148,38 +150,60 @@ static void queue_add_event(struct apm_queue *q, apm_event_t event) q->events[q->event_head] = event; } -static void queue_event_one_user(struct apm_user *as, apm_event_t event) +static void queue_event(apm_event_t event) { - if (as->suser && as->writer) { - switch (event) { - case APM_SYS_SUSPEND: - case APM_USER_SUSPEND: - /* - * If this user already has a suspend pending, - * don't queue another one. - */ - if (as->suspend_state != SUSPEND_NONE) - return; + struct apm_user *as; - as->suspend_state = SUSPEND_PENDING; - suspends_pending++; - break; - } + down_read(&user_list_lock); + list_for_each_entry(as, &apm_user_list, list) { + if (as->reader) + queue_add_event(&as->queue, event); } - queue_add_event(&as->queue, event); + up_read(&user_list_lock); + wake_up_interruptible(&apm_waitqueue); } -static void queue_event(apm_event_t event, struct apm_user *sender) +/* + * queue_suspend_event - queue an APM suspend event. + * + * Check that we're in a state where we can suspend. If not, + * return -EBUSY. Otherwise, queue an event to all "writer" + * users. If there are no "writer" users, return '1' to + * indicate that we can immediately suspend. + */ +static int queue_suspend_event(apm_event_t event, struct apm_user *sender) { struct apm_user *as; + int ret = 1; + mutex_lock(&state_lock); down_read(&user_list_lock); + + /* + * If a thread is still processing, we can't suspend, so reject + * the request. + */ list_for_each_entry(as, &apm_user_list, list) { - if (as != sender && as->reader) - queue_event_one_user(as, event); + if (as != sender && as->reader && as->writer && as->suser && + as->suspend_state != SUSPEND_NONE) { + ret = -EBUSY; + goto out; + } } + + list_for_each_entry(as, &apm_user_list, list) { + if (as != sender && as->reader && as->writer && as->suser) { + as->suspend_state = SUSPEND_PENDING; + suspends_pending++; + queue_add_event(&as->queue, event); + ret = 0; + } + } + out: up_read(&user_list_lock); + mutex_unlock(&state_lock); wake_up_interruptible(&apm_waitqueue); + return ret; } static void apm_suspend(void) @@ -191,17 +215,22 @@ static void apm_suspend(void) * Anyone on the APM queues will think we're still suspended. * Send a message so everyone knows we're now awake again. */ - queue_event(APM_NORMAL_RESUME, NULL); + queue_event(APM_NORMAL_RESUME); /* * Finally, wake up anyone who is sleeping on the suspend. */ + mutex_lock(&state_lock); down_read(&user_list_lock); list_for_each_entry(as, &apm_user_list, list) { - as->suspend_result = err; - as->suspend_state = SUSPEND_DONE; + if (as->suspend_state == SUSPEND_WAIT || + as->suspend_state == SUSPEND_ACKED) { + as->suspend_result = err; + as->suspend_state = SUSPEND_DONE; + } } up_read(&user_list_lock); + mutex_unlock(&state_lock); wake_up(&apm_suspend_waitqueue); } @@ -227,8 +256,11 @@ static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t if (copy_to_user(buf, &event, sizeof(event))) break; - if (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND) + mutex_lock(&state_lock); + if (as->suspend_state == SUSPEND_PENDING && + (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND)) as->suspend_state = SUSPEND_READ; + mutex_unlock(&state_lock); buf += sizeof(event); i -= sizeof(event); @@ -270,9 +302,13 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) switch (cmd) { case APM_IOC_SUSPEND: + mutex_lock(&state_lock); + as->suspend_result = -EINTR; if (as->suspend_state == SUSPEND_READ) { + int pending; + /* * If we read a suspend command from /dev/apm_bios, * then the corresponding APM_IOC_SUSPEND ioctl is @@ -280,47 +316,73 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) */ as->suspend_state = SUSPEND_ACKED; suspends_pending--; + pending = suspends_pending == 0; + mutex_unlock(&state_lock); + + /* + * If there are no further acknowledges required, + * suspend the system. + */ + if (pending) + apm_suspend(); + + /* + * Wait for the suspend/resume to complete. If there + * are pending acknowledges, we wait here for them. + * + * Note: we need to ensure that the PM subsystem does + * not kick us out of the wait when it suspends the + * threads. + */ + flags = current->flags; + current->flags |= PF_NOFREEZE; + + wait_event(apm_suspend_waitqueue, + as->suspend_state == SUSPEND_DONE); } else { + as->suspend_state = SUSPEND_WAIT; + mutex_unlock(&state_lock); + /* * Otherwise it is a request to suspend the system. * Queue an event for all readers, and expect an * acknowledge from all writers who haven't already * acknowledged. */ - queue_event(APM_USER_SUSPEND, as); - } - - /* - * If there are no further acknowledges required, suspend - * the system. - */ - if (suspends_pending == 0) - apm_suspend(); + err = queue_suspend_event(APM_USER_SUSPEND, as); + if (err < 0) { + /* + * Avoid taking the lock here - this + * should be fine. + */ + as->suspend_state = SUSPEND_NONE; + break; + } + + if (err > 0) + apm_suspend(); - /* - * Wait for the suspend/resume to complete. If there are - * pending acknowledges, we wait here for them. - * - * Note that we need to ensure that the PM subsystem does - * not kick us out of the wait when it suspends the threads. - */ - flags = current->flags; - current->flags |= PF_NOFREEZE; + /* + * Wait for the suspend/resume to complete. If there + * are pending acknowledges, we wait here for them. + * + * Note: we need to ensure that the PM subsystem does + * not kick us out of the wait when it suspends the + * threads. + */ + flags = current->flags; + current->flags |= PF_NOFREEZE; - /* - * Note: do not allow a thread which is acking the suspend - * to escape until the resume is complete. - */ - if (as->suspend_state == SUSPEND_ACKED) - wait_event(apm_suspend_waitqueue, - as->suspend_state == SUSPEND_DONE); - else wait_event_interruptible(apm_suspend_waitqueue, as->suspend_state == SUSPEND_DONE); + } current->flags = flags; + + mutex_lock(&state_lock); err = as->suspend_result; as->suspend_state = SUSPEND_NONE; + mutex_unlock(&state_lock); break; } @@ -330,6 +392,8 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) static int apm_release(struct inode * inode, struct file * filp) { struct apm_user *as = filp->private_data; + int pending = 0; + filp->private_data = NULL; down_write(&user_list_lock); @@ -342,11 +406,14 @@ static int apm_release(struct inode * inode, struct file * filp) * need to balance suspends_pending, which means the * possibility of sleeping. */ + mutex_lock(&state_lock); if (as->suspend_state != SUSPEND_NONE) { suspends_pending -= 1; - if (suspends_pending == 0) - apm_suspend(); + pending = suspends_pending == 0; } + mutex_unlock(&state_lock); + if (pending) + apm_suspend(); kfree(as); return 0; @@ -470,6 +537,7 @@ static int kapmd(void *arg) { do { apm_event_t event; + int ret; wait_event_interruptible(kapmd_wait, !queue_empty(&kapmd_queue) || kthread_should_stop()); @@ -489,13 +557,20 @@ static int kapmd(void *arg) case APM_LOW_BATTERY: case APM_POWER_STATUS_CHANGE: - queue_event(event, NULL); + queue_event(event); break; case APM_USER_SUSPEND: case APM_SYS_SUSPEND: - queue_event(event, NULL); - if (suspends_pending == 0) + ret = queue_suspend_event(event, NULL); + if (ret < 0) { + /* + * We were busy. Try again in 50ms. + */ + queue_add_event(&kapmd_queue, event); + msleep(50); + } + if (ret > 0) apm_suspend(); break; diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index cc2d58d..3c078e3 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -15,6 +15,7 @@ #include <asm/mach/arch.h> #include <asm/thread_info.h> #include <asm/memory.h> +#include <asm/procinfo.h> /* * Make sure that the compiler and target are compatible. diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c index b27513a..a786f76 100644 --- a/arch/arm/kernel/ecard.c +++ b/arch/arm/kernel/ecard.c @@ -529,7 +529,7 @@ static void ecard_dump_irq_state(void) } } -static void ecard_check_lockup(struct irqdesc *desc) +static void ecard_check_lockup(struct irq_desc *desc) { static unsigned long last; static int lockup; @@ -567,7 +567,7 @@ static void ecard_check_lockup(struct irqdesc *desc) } static void -ecard_irq_handler(unsigned int irq, struct irqdesc *desc) +ecard_irq_handler(unsigned int irq, struct irq_desc *desc) { ecard_t *ec; int called = 0; @@ -585,7 +585,7 @@ ecard_irq_handler(unsigned int irq, struct irqdesc *desc) pending = ecard_default_ops.irqpending(ec); if (pending) { - struct irqdesc *d = irq_desc + ec->irq; + struct irq_desc *d = irq_desc + ec->irq; desc_handle_irq(ec->irq, d); called ++; } @@ -609,7 +609,7 @@ static unsigned char first_set[] = }; static void -ecard_irqexp_handler(unsigned int irq, struct irqdesc *desc) +ecard_irqexp_handler(unsigned int irq, struct irq_desc *desc) { const unsigned int statusmask = 15; unsigned int status; @@ -1022,7 +1022,7 @@ ecard_probe(int slot, card_type_t type) if (slot < 8) { ec->irq = 32 + slot; set_irq_chip(ec->irq, &ecard_chip); - set_irq_handler(ec->irq, do_level_IRQ); + set_irq_handler(ec->irq, handle_level_irq); set_irq_flags(ec->irq, IRQF_VALID); } diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index bd623b7..2db42b1 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -589,10 +589,6 @@ ENTRY(__switch_to) strex r5, r4, [ip] @ Clear exclusive monitor #endif #endif -#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT) - mra r4, r5, acc0 - stmia ip, {r4, r5} -#endif #if defined(CONFIG_HAS_TLS_REG) mcr p15, 0, r3, c13, c0, 3 @ set TLS register #elif !defined(CONFIG_TLS_REG_EMUL) @@ -602,11 +598,6 @@ ENTRY(__switch_to) #ifdef CONFIG_MMU mcr p15, 0, r6, c3, c0, 0 @ Set domain register #endif -#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT) - add r4, r2, #TI_CPU_DOMAIN + 40 @ cpu_context_save->extra - ldmib r4, {r4, r5} - mar acc0, r4, r5 -#endif mov r5, r0 add r4, r2, #TI_CPU_SAVE ldr r0, =thread_notify_head diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index f359a18..0119c0d 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -16,7 +16,6 @@ #include <asm/assembler.h> #include <asm/mach-types.h> -#include <asm/procinfo.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index ebc3e74..bda0748 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -16,7 +16,6 @@ #include <asm/assembler.h> #include <asm/domain.h> -#include <asm/procinfo.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> #include <asm/memory.h> diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 2c4ff1c..ec01f08 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -112,7 +112,7 @@ static struct irq_desc bad_irq_desc = { asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); - struct irqdesc *desc = irq_desc + irq; + struct irq_desc *desc = irq_desc + irq; /* * Some hardware gives randomly wrong interrupts. Rather @@ -134,7 +134,7 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) void set_irq_flags(unsigned int irq, unsigned int iflags) { - struct irqdesc *desc; + struct irq_desc *desc; unsigned long flags; if (irq >= NR_IRQS) { @@ -171,7 +171,7 @@ void __init init_IRQ(void) #ifdef CONFIG_HOTPLUG_CPU -static void route_irq(struct irqdesc *desc, unsigned int irq, unsigned int cpu) +static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) { pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu); @@ -190,7 +190,7 @@ void migrate_irqs(void) unsigned int i, cpu = smp_processor_id(); for (i = 0; i < NR_IRQS; i++) { - struct irqdesc *desc = irq_desc + i; + struct irq_desc *desc = irq_desc + i; if (desc->cpu == cpu) { unsigned int newcpu = any_online_cpu(desc->affinity); diff --git a/arch/arm/kernel/iwmmxt-notifier.c b/arch/arm/kernel/iwmmxt-notifier.c deleted file mode 100644 index 0d1a1db..0000000 --- a/arch/arm/kernel/iwmmxt-notifier.c +++ /dev/null @@ -1,63 +0,0 @@ -/* - * linux/arch/arm/kernel/iwmmxt-notifier.c - * - * XScale iWMMXt (Concan) context switching and handling - * - * Initial code: - * Copyright (c) 2003, Intel Corporation - * - * Full lazy switching support, optimizations and more, by Nicolas Pitre - * Copyright (c) 2003-2004, MontaVista Software, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/module.h> -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/signal.h> -#include <linux/sched.h> -#include <linux/init.h> -#include <asm/thread_notify.h> -#include <asm/io.h> - -static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t) -{ - struct thread_info *thread = t; - - switch (cmd) { - case THREAD_NOTIFY_FLUSH: - /* - * flush_thread() zeroes thread->fpstate, so no need - * to do anything here. - * - * FALLTHROUGH: Ensure we don't try to overwrite our newly - * initialised state information on the first fault. - */ - - case THREAD_NOTIFY_RELEASE: - iwmmxt_task_release(thread); - break; - - case THREAD_NOTIFY_SWITCH: - iwmmxt_task_switch(thread); - break; - } - - return NOTIFY_DONE; -} - -static struct notifier_block iwmmxt_notifier_block = { - .notifier_call = iwmmxt_do, -}; - -static int __init iwmmxt_init(void) -{ - thread_register_notifier(&iwmmxt_notifier_block); - - return 0; -} - -late_initcall(iwmmxt_init); diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index bf35c17..a9e8f7e 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -281,67 +281,6 @@ void show_fpregs(struct user_fp *regs) } /* - * Task structure and kernel stack allocation. - */ -struct thread_info_list { - unsigned long *head; - unsigned int nr; -}; - -static DEFINE_PER_CPU(struct thread_info_list, thread_info_list) = { NULL, 0 }; - -#define EXTRA_TASK_STRUCT 4 - -struct thread_info *alloc_thread_info(struct task_struct *task) -{ - struct thread_info *thread = NULL; - - if (EXTRA_TASK_STRUCT) { - struct thread_info_list *th = &get_cpu_var(thread_info_list); - unsigned long *p = th->head; - - if (p) { - th->head = (unsigned long *)p[0]; - th->nr -= 1; - } - put_cpu_var(thread_info_list); - - thread = (struct thread_info *)p; - } - - if (!thread) - thread = (struct thread_info *) - __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); - -#ifdef CONFIG_DEBUG_STACK_USAGE - /* - * The stack must be cleared if you want SYSRQ-T to - * give sensible stack usage information - */ - if (thread) - memzero(thread, THREAD_SIZE); -#endif - return thread; -} - -void free_thread_info(struct thread_info *thread) -{ - if (EXTRA_TASK_STRUCT) { - struct thread_info_list *th = &get_cpu_var(thread_info_list); - if (th->nr < EXTRA_TASK_STRUCT) { - unsigned long *p = (unsigned long *)thread; - p[0] = (unsigned long)th->head; - th->head = p; - th->nr += 1; - put_cpu_var(thread_info_list); - return; - } - put_cpu_var(thread_info_list); - } - free_pages((unsigned long)thread, THREAD_SIZE_ORDER); -} - -/* * Free current thread data structures etc.. */ void exit_thread(void) diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 6bbd93d..238dd9b 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -438,16 +438,19 @@ __early_param("initrd=", early_initrd); static void __init arm_add_memory(unsigned long start, unsigned long size) { + struct membank *bank; + /* * Ensure that start/size are aligned to a page boundary. * Size is appropriately rounded down, start is rounded up. */ size -= start & ~PAGE_MASK; - meminfo.bank[meminfo.nr_banks].start = PAGE_ALIGN(start); - meminfo.bank[meminfo.nr_banks].size = size & PAGE_MASK; - meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(start); - meminfo.nr_banks += 1; + bank = &meminfo.bank[meminfo.nr_banks++]; + + bank->start = PAGE_ALIGN(start); + bank->size = size & PAGE_MASK; + bank->node = PHYS_TO_NID(start); } /* @@ -854,6 +857,7 @@ static const char *hwcap_str[] = { "vfp", "edsp", "java", + "iwmmxt", NULL }; diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 48cf7ff..3843d3b 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -11,7 +11,9 @@ #include <linux/signal.h> #include <linux/ptrace.h> #include <linux/personality.h> +#include <linux/freezer.h> +#include <asm/elf.h> #include <asm/cacheflush.h> #include <asm/ucontext.h> #include <asm/uaccess.h> diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 421329f..070bcb7 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -7,6 +7,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/spinlock.h> @@ -19,6 +20,7 @@ #include <linux/cpu.h> #include <linux/smp.h> #include <linux/seq_file.h> +#include <linux/irq.h> #include <asm/atomic.h> #include <asm/cacheflush.h> @@ -449,6 +451,7 @@ int smp_call_function(void (*func)(void *info), void *info, int retry, return smp_call_function_on_cpu(func, info, retry, wait, cpu_online_map); } +EXPORT_SYMBOL_GPL(smp_call_function); void show_ipi_list(struct seq_file *p) { @@ -474,25 +477,26 @@ void show_local_irqs(struct seq_file *p) seq_putc(p, '\n'); } -static void ipi_timer(struct pt_regs *regs) +static void ipi_timer(void) { - int user = user_mode(regs); - irq_enter(); - profile_tick(CPU_PROFILING, regs); - update_process_times(user); + profile_tick(CPU_PROFILING); + update_process_times(user_mode(get_irq_regs())); irq_exit(); } #ifdef CONFIG_LOCAL_TIMERS asmlinkage void do_local_timer(struct pt_regs *regs) { + struct pt_regs *old_regs = set_irq_regs(regs); int cpu = smp_processor_id(); if (local_timer_ack()) { irq_stat[cpu].local_timer_irqs++; - ipi_timer(regs); + ipi_timer(); } + + set_irq_regs(old_regs); } #endif @@ -551,6 +555,7 @@ asmlinkage void do_IPI(struct pt_regs *regs) { unsigned int cpu = smp_processor_id(); struct ipi_data *ipi = &per_cpu(ipi_data, cpu); + struct pt_regs *old_regs = set_irq_regs(regs); ipi->ipi_count++; @@ -574,7 +579,7 @@ asmlinkage void do_IPI(struct pt_regs *regs) switch (nextmsg) { case IPI_TIMER: - ipi_timer(regs); + ipi_timer(); break; case IPI_RESCHEDULE: @@ -599,6 +604,8 @@ asmlinkage void do_IPI(struct pt_regs *regs) } } while (msgs); } + + set_irq_regs(old_regs); } void smp_send_reschedule(int cpu) diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index c03cab5..6ff5e3f 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -220,10 +220,10 @@ EXPORT_SYMBOL(leds_event); #ifdef CONFIG_LEDS_TIMER static inline void do_leds(void) { - static unsigned int count = 50; + static unsigned int count = HZ/2; if (--count == 0) { - count = 50; + count = HZ/2; leds_event(led_timer); } } @@ -327,13 +327,12 @@ EXPORT_SYMBOL(restore_time_delta); */ void timer_tick(void) { - struct pt_regs *regs = get_irq_regs(); profile_tick(CPU_PROFILING); do_leds(); do_set_rtc(); do_timer(1); #ifndef CONFIG_SMP - update_process_times(user_mode(regs)); + update_process_times(user_mode(get_irq_regs())); #endif } diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index bede380..042a129 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -631,12 +631,9 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs) notify_die("unknown data abort code", regs, &info, instr, 0); } -void __attribute__((noreturn)) __bug(const char *file, int line, void *data) +void __attribute__((noreturn)) __bug(const char *file, int line) { - printk(KERN_CRIT"kernel BUG at %s:%d!", file, line); - if (data) - printk(" - extra data = %p", data); - printk("\n"); + printk(KERN_CRIT"kernel BUG at %s:%d!\n", file, line); *(int *)0 = 0; /* Avoid "noreturn function does return" */ diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 3ca574e..a8fa75e 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -45,13 +45,7 @@ SECTIONS *(.early_param.init) __early_end = .; __initcall_start = .; - *(.initcall1.init) - *(.initcall2.init) - *(.initcall3.init) - *(.initcall4.init) - *(.initcall5.init) - *(.initcall6.init) - *(.initcall7.init) + INITCALLS __initcall_end = .; __con_initcall_start = .; *(.con_initcall.init) diff --git a/arch/arm/kernel/xscale-cp0.c b/arch/arm/kernel/xscale-cp0.c new file mode 100644 index 0000000..180000b --- /dev/null +++ b/arch/arm/kernel/xscale-cp0.c @@ -0,0 +1,179 @@ +/* + * linux/arch/arm/kernel/xscale-cp0.c + * + * XScale DSP and iWMMXt coprocessor context switching and handling + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/init.h> +#include <asm/thread_notify.h> +#include <asm/io.h> + +static inline void dsp_save_state(u32 *state) +{ + __asm__ __volatile__ ( + "mrrc p0, 0, %0, %1, c0\n" + : "=r" (state[0]), "=r" (state[1])); +} + +static inline void dsp_load_state(u32 *state) +{ + __asm__ __volatile__ ( + "mcrr p0, 0, %0, %1, c0\n" + : : "r" (state[0]), "r" (state[1])); +} + +static int dsp_do(struct notifier_block *self, unsigned long cmd, void *t) +{ + struct thread_info *thread = t; + + switch (cmd) { + case THREAD_NOTIFY_FLUSH: + thread->cpu_context.extra[0] = 0; + thread->cpu_context.extra[1] = 0; + break; + + case THREAD_NOTIFY_SWITCH: + dsp_save_state(current_thread_info()->cpu_context.extra); + dsp_load_state(thread->cpu_context.extra); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block dsp_notifier_block = { + .notifier_call = dsp_do, +}; + + +#ifdef CONFIG_IWMMXT +static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t) +{ + struct thread_info *thread = t; + + switch (cmd) { + case THREAD_NOTIFY_FLUSH: + /* + * flush_thread() zeroes thread->fpstate, so no need + * to do anything here. + * + * FALLTHROUGH: Ensure we don't try to overwrite our newly + * initialised state information on the first fault. + */ + + case THREAD_NOTIFY_RELEASE: + iwmmxt_task_release(thread); + break; + + case THREAD_NOTIFY_SWITCH: + iwmmxt_task_switch(thread); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block iwmmxt_notifier_block = { + .notifier_call = iwmmxt_do, +}; +#endif + + +static u32 __init xscale_cp_access_read(void) +{ + u32 value; + + __asm__ __volatile__ ( + "mrc p15, 0, %0, c15, c1, 0\n\t" + : "=r" (value)); + + return value; +} + +static void __init xscale_cp_access_write(u32 value) +{ + u32 temp; + + __asm__ __volatile__ ( + "mcr p15, 0, %1, c15, c1, 0\n\t" + "mrc p15, 0, %0, c15, c1, 0\n\t" + "mov %0, %0\n\t" + "sub pc, pc, #4\n\t" + : "=r" (temp) : "r" (value)); +} + +/* + * Detect whether we have a MAC coprocessor (40 bit register) or an + * iWMMXt coprocessor (64 bit registers) by loading 00000100:00000000 + * into a coprocessor register and reading it back, and checking + * whether the upper word survived intact. + */ +static int __init cpu_has_iwmmxt(void) +{ + u32 lo; + u32 hi; + + /* + * This sequence is interpreted by the DSP coprocessor as: + * mar acc0, %2, %3 + * mra %0, %1, acc0 + * + * And by the iWMMXt coprocessor as: + * tmcrr wR0, %2, %3 + * tmrrc %0, %1, wR0 + */ + __asm__ __volatile__ ( + "mcrr p0, 0, %2, %3, c0\n" + "mrrc p0, 0, %0, %1, c0\n" + : "=r" (lo), "=r" (hi) + : "r" (0), "r" (0x100)); + + return !!hi; +} + + +/* + * If we detect that the CPU has iWMMXt (and CONFIG_IWMMXT=y), we + * disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy + * switch code handle iWMMXt context switching. If on the other + * hand the CPU has a DSP coprocessor, we keep access to CP0 enabled + * all the time, and save/restore acc0 on context switch in non-lazy + * fashion. + */ +static int __init xscale_cp0_init(void) +{ + u32 cp_access; + + cp_access = xscale_cp_access_read() & ~3; + xscale_cp_access_write(cp_access | 1); + + if (cpu_has_iwmmxt()) { +#ifndef CONFIG_IWMMXT + printk(KERN_WARNING "CAUTION: XScale iWMMXt coprocessor " + "detected, but kernel support is missing.\n"); +#else + printk(KERN_INFO "XScale iWMMXt coprocessor detected.\n"); + elf_hwcap |= HWCAP_IWMMXT; + thread_register_notifier(&iwmmxt_notifier_block); +#endif + } else { + printk(KERN_INFO "XScale DSP coprocessor detected.\n"); + thread_register_notifier(&dsp_notifier_block); + cp_access |= 1; + } + + xscale_cp_access_write(cp_access); + + return 0; +} + +late_initcall(xscale_cp0_init); |