summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-06-25 21:57:23 (GMT)
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-25 23:24:43 (GMT)
commit4866cde064afbb6c2a488c265e696879de616daa (patch)
tree6effad1ab6271129fc607b98273086409876563a /include
parent48c08d3f8ff94fa118187e4d8d4a5707bb85e59d (diff)
downloadlinux-4866cde064afbb6c2a488c265e696879de616daa.tar.xz
[PATCH] sched: cleanup context switch locking
Instead of requiring architecture code to interact with the scheduler's locking implementation, provide a couple of defines that can be used by the architecture to request runqueue unlocked context switches, and ask for interrupts to be enabled over the context switch. Also replaces the "switch_lock" used by these architectures with an oncpu flag (note, not a potentially slow bitflag). This eliminates one bus locked memory operation when context switching, and simplifies the task_running function. Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-arm/system.h30
-rw-r--r--include/asm-ia64/system.h10
-rw-r--r--include/asm-mips/system.h10
-rw-r--r--include/asm-s390/system.h17
-rw-r--r--include/asm-sparc/system.h4
-rw-r--r--include/asm-sparc64/system.h14
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/sched.h10
8 files changed, 23 insertions, 73 deletions
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index 39dd700..3d0d286 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -145,34 +145,12 @@ extern unsigned int user_debug;
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
-#ifdef CONFIG_SMP
/*
- * Define our own context switch locking. This allows us to enable
- * interrupts over the context switch, otherwise we end up with high
- * interrupt latency. The real problem area is switch_mm() which may
- * do a full cache flush.
+ * switch_mm() may do a full cache flush over the context switch,
+ * so enable interrupts over the context switch to avoid high
+ * latency.
*/
-#define prepare_arch_switch(rq,next) \
-do { \
- spin_lock(&(next)->switch_lock); \
- spin_unlock_irq(&(rq)->lock); \
-} while (0)
-
-#define finish_arch_switch(rq,prev) \
- spin_unlock(&(prev)->switch_lock)
-
-#define task_running(rq,p) \
- ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
-#else
-/*
- * Our UP-case is more simple, but we assume knowledge of how
- * spin_unlock_irq() and friends are implemented. This avoids
- * us needlessly decrementing and incrementing the preempt count.
- */
-#define prepare_arch_switch(rq,next) local_irq_enable()
-#define finish_arch_switch(rq,prev) spin_unlock(&(rq)->lock)
-#define task_running(rq,p) ((rq)->curr == (p))
-#endif
+#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
/*
* switch_to(prev, next) should switch from task `prev' to `next'
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 6f516e7..cd2cf76 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -183,8 +183,6 @@ do { \
#ifdef __KERNEL__
-#define prepare_to_switch() do { } while(0)
-
#ifdef CONFIG_IA32_SUPPORT
# define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0)
#else
@@ -274,13 +272,7 @@ extern void ia64_load_extra (struct task_struct *task);
* of that CPU which will not be released, because there we wait for the
* tasklist_lock to become available.
*/
-#define prepare_arch_switch(rq, next) \
-do { \
- spin_lock(&(next)->switch_lock); \
- spin_unlock(&(rq)->lock); \
-} while (0)
-#define finish_arch_switch(rq, prev) spin_unlock_irq(&(prev)->switch_lock)
-#define task_running(rq, p) ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
+#define __ARCH_WANT_UNLOCKED_CTXSW
#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 888fd89..169f3d4 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -422,16 +422,10 @@ extern void __die_if_kernel(const char *, struct pt_regs *, const char *file,
extern int stop_a_enabled;
/*
- * Taken from include/asm-ia64/system.h; prevents deadlock on SMP
+ * See include/asm-ia64/system.h; prevents deadlock on SMP
* systems.
*/
-#define prepare_arch_switch(rq, next) \
-do { \
- spin_lock(&(next)->switch_lock); \
- spin_unlock(&(rq)->lock); \
-} while (0)
-#define finish_arch_switch(rq, prev) spin_unlock_irq(&(prev)->switch_lock)
-#define task_running(rq, p) ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
+#define __ARCH_WANT_UNLOCKED_CTXSW
#define arch_align_stack(x) (x)
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index e3cb3ce..b4a9f05 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -104,29 +104,18 @@ static inline void restore_access_regs(unsigned int *acrs)
prev = __switch_to(prev,next); \
} while (0)
-#define prepare_arch_switch(rq, next) do { } while(0)
-#define task_running(rq, p) ((rq)->curr == (p))
-
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
extern void account_user_vtime(struct task_struct *);
extern void account_system_vtime(struct task_struct *);
-
-#define finish_arch_switch(rq, prev) do { \
- set_fs(current->thread.mm_segment); \
- spin_unlock(&(rq)->lock); \
- account_system_vtime(prev); \
- local_irq_enable(); \
-} while (0)
-
#else
+#define account_system_vtime(prev) do { } while (0)
+#endif
#define finish_arch_switch(rq, prev) do { \
set_fs(current->thread.mm_segment); \
- spin_unlock_irq(&(rq)->lock); \
+ account_system_vtime(prev); \
} while (0)
-#endif
-
#define nop() __asm__ __volatile__ ("nop")
#define xchg(ptr,x) \
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index 80cf20c..898562e 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -101,7 +101,7 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
* SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work)
* XXX WTF is the above comment? Found in late teen 2.4.x.
*/
-#define prepare_arch_switch(rq, next) do { \
+#define prepare_arch_switch(next) do { \
__asm__ __volatile__( \
".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
"save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
@@ -109,8 +109,6 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
"save %sp, -0x40, %sp\n\t" \
"restore; restore; restore; restore; restore; restore; restore"); \
} while(0)
-#define finish_arch_switch(rq, next) spin_unlock_irq(&(rq)->lock)
-#define task_running(rq, p) ((rq)->curr == (p))
/* Much care has gone into this code, do not touch it.
*
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index fd12ca3..f9be2c5 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -139,19 +139,13 @@ extern void __flushw_user(void);
#define flush_user_windows flushw_user
#define flush_register_windows flushw_all
-#define prepare_arch_switch(rq, next) \
-do { spin_lock(&(next)->switch_lock); \
- spin_unlock(&(rq)->lock); \
+/* Don't hold the runqueue lock over context switch */
+#define __ARCH_WANT_UNLOCKED_CTXSW
+#define prepare_arch_switch(next) \
+do { \
flushw_all(); \
} while (0)
-#define finish_arch_switch(rq, prev) \
-do { spin_unlock_irq(&(prev)->switch_lock); \
-} while (0)
-
-#define task_running(rq, p) \
- ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
-
/* See what happens when you design the chip correctly?
*
* We tell gcc we clobber all non-fixed-usage registers except
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index a6a8c1a..03206a4 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -108,7 +108,6 @@ extern struct group_info init_groups;
.blocked = {{0}}, \
.alloc_lock = SPIN_LOCK_UNLOCKED, \
.proc_lock = SPIN_LOCK_UNLOCKED, \
- .switch_lock = SPIN_LOCK_UNLOCKED, \
.journal_info = NULL, \
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 36a1078..d27be93 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -368,6 +368,11 @@ struct signal_struct {
#endif
};
+/* Context switch must be unlocked if interrupts are to be enabled */
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+# define __ARCH_WANT_UNLOCKED_CTXSW
+#endif
+
/*
* Bits in flags field of signal_struct.
*/
@@ -594,6 +599,9 @@ struct task_struct {
int lock_depth; /* BKL lock depth */
+#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
+ int oncpu;
+#endif
int prio, static_prio;
struct list_head run_list;
prio_array_t *array;
@@ -716,8 +724,6 @@ struct task_struct {
spinlock_t alloc_lock;
/* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */
spinlock_t proc_lock;
-/* context-switch lock */
- spinlock_t switch_lock;
/* journalling filesystem info */
void *journal_info;