summaryrefslogtreecommitdiff
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h207
1 files changed, 10 insertions, 197 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 625a41f..b1e963e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -23,7 +23,6 @@ struct sched_param {
#include <linux/nodemask.h>
#include <linux/mm_types.h>
-#include <asm/kmap_types.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/cputime.h>
@@ -53,7 +52,6 @@ struct sched_param {
#include <linux/llist.h>
#include <linux/uidgid.h>
#include <linux/gfp.h>
-#include <linux/hardirq.h>
#include <asm/processor.h>
@@ -167,8 +165,11 @@ extern char ___assert_task_state[1 - 2*!!(
TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
__TASK_TRACED)
+#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
#define task_is_dead(task) ((task)->exit_state != 0)
+#define task_is_stopped_or_traced(task) \
+ ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
#define task_contributes_to_load(task) \
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0)
@@ -1021,7 +1022,6 @@ enum perf_event_task_context {
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
- volatile long saved_state; /* saved state for "spinlock sleepers" */
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
@@ -1064,12 +1064,6 @@ struct task_struct {
#endif
unsigned int policy;
-#ifdef CONFIG_PREEMPT_RT_FULL
- int migrate_disable;
-# ifdef CONFIG_SCHED_DEBUG
- int migrate_disable_atomic;
-# endif
-#endif
int nr_cpus_allowed;
cpumask_t cpus_allowed;
@@ -1165,8 +1159,7 @@ struct task_struct {
struct cputime prev_cputime;
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
- raw_spinlock_t vtime_lock;
- seqcount_t vtime_seq;
+ seqlock_t vtime_seqlock;
unsigned long long vtime_snap;
enum {
VTIME_SLEEPING = 0,
@@ -1182,9 +1175,6 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
-#ifdef CONFIG_PREEMPT_RT_BASE
- struct task_struct *posix_timer_list;
-#endif
/* process credentials */
const struct cred __rcu *real_cred; /* objective and real subjective task
@@ -1216,15 +1206,10 @@ struct task_struct {
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
- struct sigqueue *sigqueue_cache;
sigset_t blocked, real_blocked;
sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
struct sigpending pending;
-#ifdef CONFIG_PREEMPT_RT_FULL
- /* TODO: move me into ->restart_block ? */
- struct siginfo forced_info;
-#endif
unsigned long sas_ss_sp;
size_t sas_ss_size;
@@ -1261,9 +1246,6 @@ struct task_struct {
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
#endif
-#ifdef CONFIG_PREEMPT_RT_FULL
- int pagefault_disabled;
-#endif
#ifdef CONFIG_TRACE_IRQFLAGS
unsigned int irq_events;
unsigned long hardirq_enable_ip;
@@ -1339,9 +1321,6 @@ struct task_struct {
struct mutex perf_event_mutex;
struct list_head perf_event_list;
#endif
-#ifdef CONFIG_DEBUG_PREEMPT
- unsigned long preempt_disable_ip;
-#endif
#ifdef CONFIG_NUMA
struct mempolicy *mempolicy; /* Protected by alloc_lock */
short il_next;
@@ -1409,12 +1388,6 @@ struct task_struct {
unsigned long trace;
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
- u64 preempt_timestamp_hist;
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- long timer_offset;
-#endif
-#endif
#endif /* CONFIG_TRACING */
#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
struct memcg_batch_info {
@@ -1438,19 +1411,11 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
-#ifdef CONFIG_PREEMPT_RT_BASE
- struct rcu_head put_rcu;
- int softirq_nestcnt;
- unsigned int softirqs_raised;
-#endif
-#ifdef CONFIG_PREEMPT_RT_FULL
-# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
- int kmap_idx;
- pte_t kmap_pte[KM_TYPE_NR];
-# endif
-#endif
};
+/* Future-safe accessor for struct task_struct's cpus_allowed. */
+#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
+
#ifdef CONFIG_NUMA_BALANCING
extern void task_numa_fault(int node, int pages, bool migrated);
extern void set_numabalancing_state(bool enabled);
@@ -1463,17 +1428,6 @@ static inline void set_numabalancing_state(bool enabled)
}
#endif
-#ifdef CONFIG_PREEMPT_RT_FULL
-static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; }
-#else
-static inline bool cur_pf_disabled(void) { return false; }
-#endif
-
-static inline bool pagefault_disabled(void)
-{
- return in_atomic() || cur_pf_disabled();
-}
-
static inline struct pid *task_pid(struct task_struct *task)
{
return task->pids[PIDTYPE_PID].pid;
@@ -1609,15 +1563,6 @@ extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
-#ifdef CONFIG_PREEMPT_RT_BASE
-extern void __put_task_struct_cb(struct rcu_head *rhp);
-
-static inline void put_task_struct(struct task_struct *t)
-{
- if (atomic_dec_and_test(&t->usage))
- call_rcu(&t->put_rcu, __put_task_struct_cb);
-}
-#else
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
@@ -1625,7 +1570,6 @@ static inline void put_task_struct(struct task_struct *t)
if (atomic_dec_and_test(&t->usage))
__put_task_struct(t);
}
-#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void task_cputime(struct task_struct *t,
@@ -1664,7 +1608,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
/*
* Per process flags
*/
-#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
@@ -1810,10 +1753,6 @@ extern void do_set_cpus_allowed(struct task_struct *p,
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
-int migrate_me(void);
-void tell_sched_cpu_down_begin(int cpu);
-void tell_sched_cpu_down_done(int cpu);
-
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
@@ -1826,9 +1765,6 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
return -EINVAL;
return 0;
}
-static inline int migrate_me(void) { return 0; }
-static inline void tell_sched_cpu_down_begin(int cpu) { }
-static inline void tell_sched_cpu_down_done(int cpu) { }
#endif
#ifdef CONFIG_NO_HZ_COMMON
@@ -2036,7 +1972,6 @@ extern void xtime_update(unsigned long ticks);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
-extern int wake_up_lock_sleeper(struct task_struct * tsk);
extern void wake_up_new_task(struct task_struct *tsk);
#ifdef CONFIG_SMP
extern void kick_process(struct task_struct *tsk);
@@ -2151,24 +2086,12 @@ extern struct mm_struct * mm_alloc(void);
/* mmdrop drops the mm and the page tables */
extern void __mmdrop(struct mm_struct *);
-
static inline void mmdrop(struct mm_struct * mm)
{
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
__mmdrop(mm);
}
-#ifdef CONFIG_PREEMPT_RT_BASE
-extern void __mmdrop_delayed(struct rcu_head *rhp);
-static inline void mmdrop_delayed(struct mm_struct *mm)
-{
- if (atomic_dec_and_test(&mm->mm_count))
- call_rcu(&mm->delayed_drop, __mmdrop_delayed);
-}
-#else
-# define mmdrop_delayed(mm) mmdrop(mm)
-#endif
-
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */
@@ -2451,52 +2374,6 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
-#ifdef CONFIG_PREEMPT_LAZY
-static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
-{
- set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
-}
-
-static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
-{
- clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
-}
-
-static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
-{
- return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
-}
-
-static inline int need_resched_lazy(void)
-{
- return test_thread_flag(TIF_NEED_RESCHED_LAZY);
-}
-
-static inline int need_resched_now(void)
-{
- return test_thread_flag(TIF_NEED_RESCHED);
-}
-
-static inline int need_resched(void)
-{
- return test_thread_flag(TIF_NEED_RESCHED) ||
- test_thread_flag(TIF_NEED_RESCHED_LAZY);
-}
-#else
-static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
-static inline int need_resched_lazy(void) { return 0; }
-
-static inline int need_resched_now(void)
-{
- return test_thread_flag(TIF_NEED_RESCHED);
-}
-
-static inline int need_resched(void)
-{
- return test_thread_flag(TIF_NEED_RESCHED);
-}
-#endif
-
static inline int restart_syscall(void)
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
@@ -2528,49 +2405,9 @@ static inline int signal_pending_state(long state, struct task_struct *p)
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}
-static inline bool __task_is_stopped_or_traced(struct task_struct *task)
-{
- if (task->state & (__TASK_STOPPED | __TASK_TRACED))
- return true;
-#ifdef CONFIG_PREEMPT_RT_FULL
- if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED))
- return true;
-#endif
- return false;
-}
-
-static inline bool task_is_stopped_or_traced(struct task_struct *task)
-{
- bool traced_stopped;
-
-#ifdef CONFIG_PREEMPT_RT_FULL
- unsigned long flags;
-
- raw_spin_lock_irqsave(&task->pi_lock, flags);
- traced_stopped = __task_is_stopped_or_traced(task);
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-#else
- traced_stopped = __task_is_stopped_or_traced(task);
-#endif
- return traced_stopped;
-}
-
-static inline bool task_is_traced(struct task_struct *task)
+static inline int need_resched(void)
{
- bool traced = false;
-
- if (task->state & __TASK_TRACED)
- return true;
-#ifdef CONFIG_PREEMPT_RT_FULL
- /* in case the task is sleeping on tasklist_lock */
- raw_spin_lock_irq(&task->pi_lock);
- if (task->state & __TASK_TRACED)
- traced = true;
- else if (task->saved_state & __TASK_TRACED)
- traced = true;
- raw_spin_unlock_irq(&task->pi_lock);
-#endif
- return traced;
+ return unlikely(test_thread_flag(TIF_NEED_RESCHED));
}
/*
@@ -2589,7 +2426,7 @@ extern int _cond_resched(void);
extern int __cond_resched_lock(spinlock_t *lock);
-#if defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT_FULL)
+#ifdef CONFIG_PREEMPT_COUNT
#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
#else
#define PREEMPT_LOCK_OFFSET 0
@@ -2600,16 +2437,12 @@ extern int __cond_resched_lock(spinlock_t *lock);
__cond_resched_lock(lock); \
})
-#ifndef CONFIG_PREEMPT_RT_FULL
extern int __cond_resched_softirq(void);
#define cond_resched_softirq() ({ \
__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
__cond_resched_softirq(); \
})
-#else
-# define cond_resched_softirq() cond_resched()
-#endif
static inline void cond_resched_rcu(void)
{
@@ -2795,26 +2628,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
-static inline int __migrate_disabled(struct task_struct *p)
-{
-#ifdef CONFIG_PREEMPT_RT_FULL
- return p->migrate_disable;
-#else
- return 0;
-#endif
-}
-
-/* Future-safe accessor for struct task_struct's cpus_allowed. */
-static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
-{
-#ifdef CONFIG_PREEMPT_RT_FULL
- if (p->migrate_disable)
- return cpumask_of(task_cpu(p));
-#endif
-
- return &p->cpus_allowed;
-}
-
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);