summaryrefslogtreecommitdiff
path: root/kernel/signal.c
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2014-05-14 18:19:12 (GMT)
committerScott Wood <scottwood@freescale.com>2014-05-14 18:37:18 (GMT)
commit86ba38e6f5f2fbfe9b49e153ea89593b26482019 (patch)
treef99d2906b0eafca507f37289e68052fc105cc2dc /kernel/signal.c
parent07c8b57b111585a617b2b456497fc9b33c00743c (diff)
downloadlinux-fsl-qoriq-86ba38e6f5f2fbfe9b49e153ea89593b26482019.tar.xz
Reset to 3.12.19
Diffstat (limited to 'kernel/signal.c')
-rw-r--r--kernel/signal.c135
1 files changed, 17 insertions, 118 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index 3d32f54..ded28b9 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -14,7 +14,6 @@
#include <linux/export.h>
#include <linux/init.h>
#include <linux/sched.h>
-#include <linux/sched/rt.h>
#include <linux/fs.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
@@ -350,45 +349,13 @@ static bool task_participate_group_stop(struct task_struct *task)
return false;
}
-#ifdef __HAVE_ARCH_CMPXCHG
-static inline struct sigqueue *get_task_cache(struct task_struct *t)
-{
- struct sigqueue *q = t->sigqueue_cache;
-
- if (cmpxchg(&t->sigqueue_cache, q, NULL) != q)
- return NULL;
- return q;
-}
-
-static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
-{
- if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
- return 0;
- return 1;
-}
-
-#else
-
-static inline struct sigqueue *get_task_cache(struct task_struct *t)
-{
- return NULL;
-}
-
-static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
-{
- return 1;
-}
-
-#endif
-
/*
* allocate a new signal queue record
* - this may be called without locks if and only if t == current, otherwise an
* appropriate lock must be held to stop the target task from exiting
*/
static struct sigqueue *
-__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
- int override_rlimit, int fromslab)
+__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
{
struct sigqueue *q = NULL;
struct user_struct *user;
@@ -405,10 +372,7 @@ __sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
if (override_rlimit ||
atomic_read(&user->sigpending) <=
task_rlimit(t, RLIMIT_SIGPENDING)) {
- if (!fromslab)
- q = get_task_cache(t);
- if (!q)
- q = kmem_cache_alloc(sigqueue_cachep, flags);
+ q = kmem_cache_alloc(sigqueue_cachep, flags);
} else {
print_dropped_signal(sig);
}
@@ -425,13 +389,6 @@ __sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
return q;
}
-static struct sigqueue *
-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
- int override_rlimit)
-{
- return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0);
-}
-
static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
@@ -441,21 +398,6 @@ static void __sigqueue_free(struct sigqueue *q)
kmem_cache_free(sigqueue_cachep, q);
}
-static void sigqueue_free_current(struct sigqueue *q)
-{
- struct user_struct *up;
-
- if (q->flags & SIGQUEUE_PREALLOC)
- return;
-
- up = q->user;
- if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
- atomic_dec(&up->sigpending);
- free_uid(up);
- } else
- __sigqueue_free(q);
-}
-
void flush_sigqueue(struct sigpending *queue)
{
struct sigqueue *q;
@@ -469,21 +411,6 @@ void flush_sigqueue(struct sigpending *queue)
}
/*
- * Called from __exit_signal. Flush tsk->pending and
- * tsk->sigqueue_cache
- */
-void flush_task_sigqueue(struct task_struct *tsk)
-{
- struct sigqueue *q;
-
- flush_sigqueue(&tsk->pending);
-
- q = get_task_cache(tsk);
- if (q)
- kmem_cache_free(sigqueue_cachep, q);
-}
-
-/*
* Flush all pending signals for a task.
*/
void __flush_signals(struct task_struct *t)
@@ -635,7 +562,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
still_pending:
list_del_init(&first->list);
copy_siginfo(info, &first->info);
- sigqueue_free_current(first);
+ __sigqueue_free(first);
} else {
/*
* Ok, it wasn't in the queue. This must be
@@ -681,8 +608,6 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
{
int signr;
- WARN_ON_ONCE(tsk != current);
-
/* We only dequeue private signals from ourselves, we don't let
* signalfd steal them
*/
@@ -1305,8 +1230,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
* We don't want to have recursive SIGSEGV's etc, for example,
* that is why we also clear SIGNAL_UNKILLABLE.
*/
-static int
-do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+int
+force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
{
unsigned long int flags;
int ret, blocked, ignored;
@@ -1331,39 +1256,6 @@ do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
return ret;
}
-int force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
-{
-/*
- * On some archs, PREEMPT_RT has to delay sending a signal from a trap
- * since it can not enable preemption, and the signal code's spin_locks
- * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will
- * send the signal on exit of the trap.
- */
-#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
- if (in_atomic()) {
- if (WARN_ON_ONCE(t != current))
- return 0;
- if (WARN_ON_ONCE(t->forced_info.si_signo))
- return 0;
-
- if (is_si_special(info)) {
- WARN_ON_ONCE(info != SEND_SIG_PRIV);
- t->forced_info.si_signo = sig;
- t->forced_info.si_errno = 0;
- t->forced_info.si_code = SI_KERNEL;
- t->forced_info.si_pid = 0;
- t->forced_info.si_uid = 0;
- } else {
- t->forced_info = *info;
- }
-
- set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
- return 0;
- }
-#endif
- return do_force_sig_info(sig, info, t);
-}
-
/*
* Nuke all other threads in the group.
*/
@@ -1394,12 +1286,12 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
struct sighand_struct *sighand;
for (;;) {
- local_irq_save_nort(*flags);
+ local_irq_save(*flags);
rcu_read_lock();
sighand = rcu_dereference(tsk->sighand);
if (unlikely(sighand == NULL)) {
rcu_read_unlock();
- local_irq_restore_nort(*flags);
+ local_irq_restore(*flags);
break;
}
@@ -1410,7 +1302,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
}
spin_unlock(&sighand->siglock);
rcu_read_unlock();
- local_irq_restore_nort(*flags);
+ local_irq_restore(*flags);
}
return sighand;
@@ -1655,8 +1547,7 @@ EXPORT_SYMBOL(kill_pid);
*/
struct sigqueue *sigqueue_alloc(void)
{
- /* Preallocated sigqueue objects always from the slabcache ! */
- struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1);
+ struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
if (q)
q->flags |= SIGQUEUE_PREALLOC;
@@ -2017,7 +1908,15 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
if (gstop_done && ptrace_reparented(current))
do_notify_parent_cldstop(current, false, why);
+ /*
+ * Don't want to allow preemption here, because
+ * sys_ptrace() needs this task to be inactive.
+ *
+ * XXX: implement read_unlock_no_resched().
+ */
+ preempt_disable();
read_unlock(&tasklist_lock);
+ preempt_enable_no_resched();
freezable_schedule();
} else {
/*