From 0f6faa2ae9ba93eb01cefa34dcfb9ba305a33ea8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 3 Jul 2009 08:30:37 -0500 Subject: mm: Prepare decoupling the page fault disabling logic Add a pagefault_disabled variable to task_struct to allow decoupling the pagefault-disabled logic from the preempt count. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner diff --git a/include/linux/sched.h b/include/linux/sched.h index b02a1b0..c57929c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1453,6 +1453,7 @@ struct task_struct { /* mutex deadlock detection */ struct mutex_waiter *blocked_on; #endif + int pagefault_disabled; #ifdef CONFIG_TRACE_IRQFLAGS unsigned int irq_events; unsigned long hardirq_enable_ip; diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 5ca0951..9414a1b 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -6,37 +6,10 @@ /* * These routines enable/disable the pagefault handler in that - * it will not take any locks and go straight to the fixup table. - * - * They have great resemblance to the preempt_disable/enable calls - * and in fact they are identical; this is because currently there is - * no other way to make the pagefault handlers do this. So we do - * disable preemption but we don't necessarily care about that. + * it will not take any MM locks and go straight to the fixup table. */ -static inline void pagefault_disable(void) -{ - inc_preempt_count(); - /* - * make sure to have issued the store before a pagefault - * can hit. - */ - barrier(); -} - -static inline void pagefault_enable(void) -{ - /* - * make sure to issue those last loads/stores before enabling - * the pagefault handler again. - */ - barrier(); - dec_preempt_count(); - /* - * make sure we do.. - */ - barrier(); - preempt_check_resched(); -} +extern void pagefault_disable(void); +extern void pagefault_enable(void); #ifndef ARCH_HAS_NOCACHE_UACCESS diff --git a/kernel/fork.c b/kernel/fork.c index e4cee21..4349b83 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1285,6 +1285,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->hardirq_context = 0; p->softirq_context = 0; #endif + p->pagefault_disabled = 0; #ifdef CONFIG_LOCKDEP p->lockdep_depth = 0; /* no locks held yet */ p->curr_chain_key = 0; diff --git a/mm/memory.c b/mm/memory.c index 32a495a..0b1fbfd 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3717,6 +3717,35 @@ unlock: return 0; } +void pagefault_disable(void) +{ + inc_preempt_count(); + current->pagefault_disabled++; + /* + * make sure to have issued the store before a pagefault + * can hit. + */ + barrier(); +} +EXPORT_SYMBOL(pagefault_disable); + +void pagefault_enable(void) +{ + /* + * make sure to issue those last loads/stores before enabling + * the pagefault handler again. + */ + barrier(); + current->pagefault_disabled--; + dec_preempt_count(); + /* + * make sure we do.. + */ + barrier(); + preempt_check_resched(); +} +EXPORT_SYMBOL(pagefault_enable); + /* * By the time we get here, we already hold the mm semaphore */ -- cgit v0.10.2