summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-22 02:43:57 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-22 02:43:57 (GMT)
commitbf67f3a5c456a18f2e8d062f7e88506ef2cd9837 (patch)
tree2a2324b2572162059307db82f9238eeb25673a77 /arch/powerpc/kernel
parent226da0dbc84ed97f448523e2a4cb91c27fa68ed9 (diff)
parent203dacbdca977bedaba61ad2fca75d934060a5d5 (diff)
downloadlinux-bf67f3a5c456a18f2e8d062f7e88506ef2cd9837.tar.xz
Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull smp hotplug cleanups from Thomas Gleixner: "This series is merily a cleanup of code copied around in arch/* and not changing any of the real cpu hotplug horrors yet. I wish I'd had something more substantial for 3.5, but I underestimated the lurking horror..." Fix up trivial conflicts in arch/{arm,sparc,x86}/Kconfig and arch/sparc/include/asm/thread_info_32.h * 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (79 commits) um: Remove leftover declaration of alloc_task_struct_node() task_allocator: Use config switches instead of magic defines sparc: Use common threadinfo allocator score: Use common threadinfo allocator sh-use-common-threadinfo-allocator mn10300: Use common threadinfo allocator powerpc: Use common threadinfo allocator mips: Use common threadinfo allocator hexagon: Use common threadinfo allocator m32r: Use common threadinfo allocator frv: Use common threadinfo allocator cris: Use common threadinfo allocator x86: Use common threadinfo allocator c6x: Use common threadinfo allocator fork: Provide kmemcache based thread_info allocator tile: Use common threadinfo allocator fork: Provide weak arch_release_[task_struct|thread_info] functions fork: Move thread info gfp flags to header fork: Remove the weak insanity sh: Remove cpu_idle_wait() ...
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/idle.c23
-rw-r--r--arch/powerpc/kernel/init_task.c29
-rw-r--r--arch/powerpc/kernel/process.c31
-rw-r--r--arch/powerpc/kernel/smp.c76
5 files changed, 7 insertions, 154 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index f5808a3..83afacd 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -28,7 +28,7 @@ endif
obj-y := cputable.o ptrace.o syscalls.o \
irq.o align.o signal_32.o pmc.o vdso.o \
- init_task.o process.o systbl.o idle.o \
+ process.o systbl.o idle.o \
signal.o sysfs.o cacheinfo.o time.o \
prom.o traps.o setup-common.o \
udbg.o misc.o io.o dma.o \
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 6d2209a..2099d9a 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -113,29 +113,6 @@ void cpu_idle(void)
}
}
-
-/*
- * cpu_idle_wait - Used to ensure that all the CPUs come out of the old
- * idle loop and start using the new idle loop.
- * Required while changing idle handler on SMP systems.
- * Caller must have changed idle handler to the new value before the call.
- * This window may be larger on shared systems.
- */
-void cpu_idle_wait(void)
-{
- int cpu;
- smp_mb();
-
- /* kick all the CPUs so that they exit out of old idle routine */
- get_online_cpus();
- for_each_online_cpu(cpu) {
- if (cpu != smp_processor_id())
- smp_send_reschedule(cpu);
- }
- put_online_cpus();
-}
-EXPORT_SYMBOL_GPL(cpu_idle_wait);
-
int powersave_nap;
#ifdef CONFIG_SYSCTL
diff --git a/arch/powerpc/kernel/init_task.c b/arch/powerpc/kernel/init_task.c
deleted file mode 100644
index d076d46..0000000
--- a/arch/powerpc/kernel/init_task.c
+++ /dev/null
@@ -1,29 +0,0 @@
-#include <linux/mm.h>
-#include <linux/export.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/init_task.h>
-#include <linux/fs.h>
-#include <linux/mqueue.h>
-#include <asm/uaccess.h>
-
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-/*
- * Initial thread structure.
- *
- * We need to make sure that this is 16384-byte aligned due to the
- * way process stacks are handled. This is done by having a special
- * "init_task" linker map entry..
- */
-union thread_union init_thread_union __init_task_data =
- { INIT_THREAD_INFO(init_task) };
-
-/*
- * Initial task structure.
- *
- * All other task structs will be allocated on slabs in fork.c
- */
-struct task_struct init_task = INIT_TASK(init_task);
-
-EXPORT_SYMBOL(init_task);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 4937c96..aa05935 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1252,37 +1252,6 @@ void __ppc64_runlatch_off(void)
}
#endif /* CONFIG_PPC64 */
-#if THREAD_SHIFT < PAGE_SHIFT
-
-static struct kmem_cache *thread_info_cache;
-
-struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
-{
- struct thread_info *ti;
-
- ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node);
- if (unlikely(ti == NULL))
- return NULL;
-#ifdef CONFIG_DEBUG_STACK_USAGE
- memset(ti, 0, THREAD_SIZE);
-#endif
- return ti;
-}
-
-void free_thread_info(struct thread_info *ti)
-{
- kmem_cache_free(thread_info_cache, ti);
-}
-
-void thread_info_cache_init(void)
-{
- thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
- THREAD_SIZE, 0, NULL);
- BUG_ON(thread_info_cache == NULL);
-}
-
-#endif /* THREAD_SHIFT < PAGE_SHIFT */
-
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index d9f9441..e4cb343 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -57,27 +57,9 @@
#define DBG(fmt...)
#endif
-
-/* Store all idle threads, this can be reused instead of creating
-* a new thread. Also avoids complicated thread destroy functionality
-* for idle threads.
-*/
#ifdef CONFIG_HOTPLUG_CPU
-/*
- * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
- * removed after init for !CONFIG_HOTPLUG_CPU.
- */
-static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
-#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
-#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
-
/* State of each CPU during hotplug phases */
static DEFINE_PER_CPU(int, cpu_state) = { 0 };
-
-#else
-static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
-#define get_idle_for_cpu(x) (idle_thread_array[(x)])
-#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
#endif
struct thread_info *secondary_ti;
@@ -429,60 +411,19 @@ int generic_check_cpu_restart(unsigned int cpu)
}
#endif
-struct create_idle {
- struct work_struct work;
- struct task_struct *idle;
- struct completion done;
- int cpu;
-};
-
-static void __cpuinit do_fork_idle(struct work_struct *work)
+static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
{
- struct create_idle *c_idle =
- container_of(work, struct create_idle, work);
-
- c_idle->idle = fork_idle(c_idle->cpu);
- complete(&c_idle->done);
-}
-
-static int __cpuinit create_idle(unsigned int cpu)
-{
- struct thread_info *ti;
- struct create_idle c_idle = {
- .cpu = cpu,
- .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
- };
- INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
-
- c_idle.idle = get_idle_for_cpu(cpu);
-
- /* We can't use kernel_thread since we must avoid to
- * reschedule the child. We use a workqueue because
- * we want to fork from a kernel thread, not whatever
- * userspace process happens to be trying to online us.
- */
- if (!c_idle.idle) {
- schedule_work(&c_idle.work);
- wait_for_completion(&c_idle.done);
- } else
- init_idle(c_idle.idle, cpu);
- if (IS_ERR(c_idle.idle)) {
- pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle));
- return PTR_ERR(c_idle.idle);
- }
- ti = task_thread_info(c_idle.idle);
+ struct thread_info *ti = task_thread_info(idle);
#ifdef CONFIG_PPC64
- paca[cpu].__current = c_idle.idle;
+ paca[cpu].__current = idle;
paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
#endif
ti->cpu = cpu;
- current_set[cpu] = ti;
-
- return 0;
+ secondary_ti = current_set[cpu] = ti;
}
-int __cpuinit __cpu_up(unsigned int cpu)
+int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int rc, c;
@@ -490,12 +431,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
(smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
return -EINVAL;
- /* Make sure we have an idle thread */
- rc = create_idle(cpu);
- if (rc)
- return rc;
-
- secondary_ti = current_set[cpu];
+ cpu_idle_thread_init(cpu, tidle);
/* Make sure callin-map entry is 0 (can be leftover a CPU
* hotplug