summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-07-03 13:30:37 (GMT)
committerScott Wood <scottwood@freescale.com>2014-05-14 18:37:47 (GMT)
commit07813f094869d3d7029269c0cd46115b17ab1666 (patch)
tree4694c70e17a64f97df02f6d28351ae95721763cd
parentb9781be7057ca04c2bbf6f6d4d4da9925afb1ef2 (diff)
downloadlinux-fsl-qoriq-07813f094869d3d7029269c0cd46115b17ab1666.tar.xz
mm: Prepare decoupling the page fault disabling logic
Add a pagefault_disabled variable to task_struct to allow decoupling the pagefault-disabled logic from the preempt count. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/uaccess.h33
-rw-r--r--kernel/fork.c1
-rw-r--r--mm/memory.c29
4 files changed, 34 insertions, 30 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9e254d8..5f0c31c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1249,6 +1249,7 @@ struct task_struct {
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
#endif
+ int pagefault_disabled;
#ifdef CONFIG_TRACE_IRQFLAGS
unsigned int irq_events;
unsigned long hardirq_enable_ip;
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 5ca0951..9414a1b 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -6,37 +6,10 @@
/*
* These routines enable/disable the pagefault handler in that
- * it will not take any locks and go straight to the fixup table.
- *
- * They have great resemblance to the preempt_disable/enable calls
- * and in fact they are identical; this is because currently there is
- * no other way to make the pagefault handlers do this. So we do
- * disable preemption but we don't necessarily care about that.
+ * it will not take any MM locks and go straight to the fixup table.
*/
-static inline void pagefault_disable(void)
-{
- inc_preempt_count();
- /*
- * make sure to have issued the store before a pagefault
- * can hit.
- */
- barrier();
-}
-
-static inline void pagefault_enable(void)
-{
- /*
- * make sure to issue those last loads/stores before enabling
- * the pagefault handler again.
- */
- barrier();
- dec_preempt_count();
- /*
- * make sure we do..
- */
- barrier();
- preempt_check_resched();
-}
+extern void pagefault_disable(void);
+extern void pagefault_enable(void);
#ifndef ARCH_HAS_NOCACHE_UACCESS
diff --git a/kernel/fork.c b/kernel/fork.c
index c0174b8..a171a7f 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1297,6 +1297,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->hardirq_context = 0;
p->softirq_context = 0;
#endif
+ p->pagefault_disabled = 0;
#ifdef CONFIG_LOCKDEP
p->lockdep_depth = 0; /* no locks held yet */
p->curr_chain_key = 0;
diff --git a/mm/memory.c b/mm/memory.c
index 22e67a2..7ad7e6d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3742,6 +3742,35 @@ unlock:
return 0;
}
+void pagefault_disable(void)
+{
+ inc_preempt_count();
+ current->pagefault_disabled++;
+ /*
+ * make sure to have issued the store before a pagefault
+ * can hit.
+ */
+ barrier();
+}
+EXPORT_SYMBOL(pagefault_disable);
+
+void pagefault_enable(void)
+{
+ /*
+ * make sure to issue those last loads/stores before enabling
+ * the pagefault handler again.
+ */
+ barrier();
+ current->pagefault_disabled--;
+ dec_preempt_count();
+ /*
+ * make sure we do..
+ */
+ barrier();
+ preempt_check_resched();
+}
+EXPORT_SYMBOL(pagefault_enable);
+
/*
* By the time we get here, we already hold the mm semaphore
*/