summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2013-10-29 19:50:27 (GMT)
committerScott Wood <scottwood@freescale.com>2013-10-29 19:50:37 (GMT)
commitd0ebef8230e267ec47d4d4a65fe3262e2ebb8026 (patch)
tree24b8bb342576f543dac42d59821c4feb7ce07453 /include/linux
parent041f2bc64a985b30328de4cb596f04fd913a85de (diff)
downloadlinux-fsl-qoriq-d0ebef8230e267ec47d4d4a65fe3262e2ebb8026.tar.xz
Revert to v3.8 (no RT, no stable)
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/ata.h2
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/buffer_head.h44
-rw-r--r--include/linux/capability.h2
-rw-r--r--include/linux/cgroup.h3
-rw-r--r--include/linux/completion.h8
-rw-r--r--include/linux/console.h3
-rw-r--r--include/linux/cpu.h4
-rw-r--r--include/linux/delay.h6
-rw-r--r--include/linux/device-mapper.h4
-rw-r--r--include/linux/fs_struct.h2
-rw-r--r--include/linux/ftrace.h3
-rw-r--r--include/linux/ftrace_event.h4
-rw-r--r--include/linux/hardirq.h16
-rw-r--r--include/linux/highmem.h28
-rw-r--r--include/linux/hrtimer.h16
-rw-r--r--include/linux/hugetlb.h19
-rw-r--r--include/linux/idr.h2
-rw-r--r--include/linux/if_vlan.h6
-rw-r--r--include/linux/init_task.h7
-rw-r--r--include/linux/interrupt.h65
-rw-r--r--include/linux/ipc_namespace.h2
-rw-r--r--include/linux/irq.h5
-rw-r--r--include/linux/irqdesc.h1
-rw-r--r--include/linux/irqflags.h29
-rw-r--r--include/linux/jbd2.h2
-rw-r--r--include/linux/jbd_common.h24
-rw-r--r--include/linux/jiffies.h1
-rw-r--r--include/linux/jump_label.h3
-rw-r--r--include/linux/kdb.h3
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/linux/kvm_types.h1
-rw-r--r--include/linux/lglock.h19
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/list.h11
-rw-r--r--include/linux/llist.h25
-rw-r--r--include/linux/locallock.h253
-rw-r--r--include/linux/mfd/rtsx_pci.h2
-rw-r--r--include/linux/mm.h48
-rw-r--r--include/linux/mm_types.h8
-rw-r--r--include/linux/mount.h2
-rw-r--r--include/linux/mtd/nand.h7
-rw-r--r--include/linux/mutex.h21
-rw-r--r--include/linux/mutex_rt.h84
-rw-r--r--include/linux/netdevice.h5
-rw-r--r--include/linux/netfilter/x_tables.h7
-rw-r--r--include/linux/notifier.h26
-rw-r--r--include/linux/of.h2
-rw-r--r--include/linux/page_cgroup.h15
-rw-r--r--include/linux/percpu.h25
-rw-r--r--include/linux/perf_event.h6
-rw-r--r--include/linux/pid.h1
-rw-r--r--include/linux/platform_data/cpsw.h6
-rw-r--r--include/linux/pps_kernel.h17
-rw-r--r--include/linux/preempt.h90
-rw-r--r--include/linux/printk.h9
-rw-r--r--include/linux/pstore.h6
-rw-r--r--include/linux/quota.h1
-rw-r--r--include/linux/radix-tree.h8
-rw-r--r--include/linux/random.h2
-rw-r--r--include/linux/rcupdate.h26
-rw-r--r--include/linux/rcutree.h18
-rw-r--r--include/linux/rtmutex.h38
-rw-r--r--include/linux/rwlock_rt.h123
-rw-r--r--include/linux/rwlock_types.h7
-rw-r--r--include/linux/rwlock_types_rt.h33
-rw-r--r--include/linux/rwsem.h6
-rw-r--r--include/linux/rwsem_rt.h128
-rw-r--r--include/linux/sched.h192
-rw-r--r--include/linux/seqlock.h236
-rw-r--r--include/linux/signal.h1
-rw-r--r--include/linux/skbuff.h27
-rw-r--r--include/linux/slub_def.h2
-rw-r--r--include/linux/smp.h3
-rw-r--r--include/linux/spinlock.h12
-rw-r--r--include/linux/spinlock_api_smp.h4
-rw-r--r--include/linux/spinlock_rt.h168
-rw-r--r--include/linux/spinlock_types.h79
-rw-r--r--include/linux/spinlock_types_nort.h33
-rw-r--r--include/linux/spinlock_types_raw.h56
-rw-r--r--include/linux/spinlock_types_rt.h51
-rw-r--r--include/linux/spinlock_up.h29
-rw-r--r--include/linux/srcu.h14
-rw-r--r--include/linux/ssb/ssb_driver_chipcommon.h2
-rw-r--r--include/linux/sysctl.h1
-rw-r--r--include/linux/thermal.h2
-rw-r--r--include/linux/timekeeper_internal.h4
-rw-r--r--include/linux/timer.h2
-rw-r--r--include/linux/uaccess.h41
-rw-r--r--include/linux/uprobes.h1
-rw-r--r--include/linux/usb/cdc_ncm.h1
-rw-r--r--include/linux/usb/serial.h2
-rw-r--r--include/linux/user_namespace.h4
-rw-r--r--include/linux/vmstat.h4
-rw-r--r--include/linux/vt_kern.h3
-rw-r--r--include/linux/wait-simple.h199
97 files changed, 391 insertions, 2188 deletions
diff --git a/include/linux/ata.h b/include/linux/ata.h
index ee0bd95..8f7a3d6 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -954,7 +954,7 @@ static inline int atapi_cdb_len(const u16 *dev_id)
}
}
-static inline int atapi_command_packet_set(const u16 *dev_id)
+static inline bool atapi_command_packet_set(const u16 *dev_id)
{
return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f;
}
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1dbdb1a..f94bc83 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -836,7 +836,7 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
unsigned int cmd_flags)
{
if (unlikely(cmd_flags & REQ_DISCARD))
- return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+ return q->limits.max_discard_sectors;
if (unlikely(cmd_flags & REQ_WRITE_SAME))
return q->limits.max_write_same_sectors;
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 3f8e27b..458f497 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -72,52 +72,8 @@ struct buffer_head {
struct address_space *b_assoc_map; /* mapping this buffer is
associated with */
atomic_t b_count; /* users using this buffer_head */
-#ifdef CONFIG_PREEMPT_RT_BASE
- spinlock_t b_uptodate_lock;
-#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
- defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
- spinlock_t b_state_lock;
- spinlock_t b_journal_head_lock;
-#endif
-#endif
};
-static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
-{
- unsigned long flags;
-
-#ifndef CONFIG_PREEMPT_RT_BASE
- local_irq_save(flags);
- bit_spin_lock(BH_Uptodate_Lock, &bh->b_state);
-#else
- spin_lock_irqsave(&bh->b_uptodate_lock, flags);
-#endif
- return flags;
-}
-
-static inline void
-bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
-{
-#ifndef CONFIG_PREEMPT_RT_BASE
- bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state);
- local_irq_restore(flags);
-#else
- spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
-#endif
-}
-
-static inline void buffer_head_init_locks(struct buffer_head *bh)
-{
-#ifdef CONFIG_PREEMPT_RT_BASE
- spin_lock_init(&bh->b_uptodate_lock);
-#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
- defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
- spin_lock_init(&bh->b_state_lock);
- spin_lock_init(&bh->b_journal_head_lock);
-#endif
-#endif
-}
-
/*
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* and buffer_foo() functions.
diff --git a/include/linux/capability.h b/include/linux/capability.h
index d9a4f7f..98503b7 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -35,7 +35,6 @@ struct cpu_vfs_cap_data {
#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t))
-struct file;
struct inode;
struct dentry;
struct user_namespace;
@@ -212,7 +211,6 @@ extern bool capable(int cap);
extern bool ns_capable(struct user_namespace *ns, int cap);
extern bool nsown_capable(int cap);
extern bool inode_capable(const struct inode *inode, int cap);
-extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
/* audit system wants to get cap info from files as well */
extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 2322df7..7d73905 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -303,6 +303,9 @@ struct cftype {
/* CFTYPE_* flags */
unsigned int flags;
+ /* file xattrs */
+ struct simple_xattrs xattrs;
+
int (*open)(struct inode *inode, struct file *file);
ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft,
struct file *file,
diff --git a/include/linux/completion.h b/include/linux/completion.h
index ebb6565..51494e6 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -8,7 +8,7 @@
* See kernel/sched.c for details.
*/
-#include <linux/wait-simple.h>
+#include <linux/wait.h>
/*
* struct completion - structure used to maintain state for a "completion"
@@ -24,11 +24,11 @@
*/
struct completion {
unsigned int done;
- struct swait_head wait;
+ wait_queue_head_t wait;
};
#define COMPLETION_INITIALIZER(work) \
- { 0, SWAIT_HEAD_INITIALIZER((work).wait) }
+ { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
#define COMPLETION_INITIALIZER_ONSTACK(work) \
({ init_completion(&work); work; })
@@ -73,7 +73,7 @@ struct completion {
static inline void init_completion(struct completion *x)
{
x->done = 0;
- init_swait_head(&x->wait);
+ init_waitqueue_head(&x->wait);
}
extern void wait_for_completion(struct completion *);
diff --git a/include/linux/console.h b/include/linux/console.h
index 4a6948a..dedb082 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -77,9 +77,7 @@ extern const struct consw prom_con; /* SPARC PROM console */
int con_is_bound(const struct consw *csw);
int register_con_driver(const struct consw *csw, int first, int last);
int unregister_con_driver(const struct consw *csw);
-int do_unregister_con_driver(const struct consw *csw);
int take_over_console(const struct consw *sw, int first, int last, int deflt);
-int do_take_over_console(const struct consw *sw, int first, int last, int deflt);
void give_up_console(const struct consw *sw);
#ifdef CONFIG_HW_CONSOLE
int con_debug_enter(struct vc_data *vc);
@@ -141,7 +139,6 @@ struct console {
for (con = console_drivers; con != NULL; con = con->next)
extern int console_set_on_cmdline;
-extern struct console *early_console;
extern int add_preferred_console(char *name, int idx, char *options);
extern int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 7781c9e..ce7a074 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -175,8 +175,6 @@ extern struct bus_type cpu_subsys;
extern void get_online_cpus(void);
extern void put_online_cpus(void);
-extern void pin_current_cpu(void);
-extern void unpin_current_cpu(void);
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
@@ -200,8 +198,6 @@ static inline void cpu_hotplug_driver_unlock(void)
#define get_online_cpus() do { } while (0)
#define put_online_cpus() do { } while (0)
-static inline void pin_current_cpu(void) { }
-static inline void unpin_current_cpu(void) { }
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
/* These aren't inline functions due to a GCC bug. */
#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
diff --git a/include/linux/delay.h b/include/linux/delay.h
index e23a7c0..a6ecb34 100644
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -52,10 +52,4 @@ static inline void ssleep(unsigned int seconds)
msleep(seconds * 1000);
}
-#ifdef CONFIG_PREEMPT_RT_FULL
-# define cpu_chill() msleep(1)
-#else
-# define cpu_chill() cpu_relax()
-#endif
-
#endif /* defined(_LINUX_DELAY_H) */
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index a5cda3e..bf6afa2 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -68,8 +68,8 @@ typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
typedef int (*dm_preresume_fn) (struct dm_target *ti);
typedef void (*dm_resume_fn) (struct dm_target *ti);
-typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
- unsigned status_flags, char *result, unsigned maxlen);
+typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
+ unsigned status_flags, char *result, unsigned maxlen);
typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index 324f931..d0ae3a8 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -50,6 +50,4 @@ static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root,
spin_unlock(&fs->lock);
}
-extern bool current_chrooted(void);
-
#endif /* _LINUX_FS_STRUCT_H */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 616603d..92691d8 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -394,6 +394,7 @@ ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
size_t cnt, loff_t *ppos);
ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
size_t cnt, loff_t *ppos);
+loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int whence);
int ftrace_regex_release(struct inode *inode, struct file *file);
void __init
@@ -566,8 +567,6 @@ static inline int
ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
#endif /* CONFIG_DYNAMIC_FTRACE */
-loff_t ftrace_filter_lseek(struct file *file, loff_t offset, int whence);
-
/* totally disable ftrace - can not re-enable after this */
void ftrace_kill(void);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 16ad63d..a3d4895 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -49,9 +49,7 @@ struct trace_entry {
unsigned char flags;
unsigned char preempt_count;
int pid;
- unsigned short migrate_disable;
- unsigned short padding;
- unsigned char preempt_lazy_count;
+ int padding;
};
#define FTRACE_MAX_EVENT \
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index dfa97de..624ef3f 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -61,11 +61,7 @@
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
-#ifndef CONFIG_PREEMPT_RT_FULL
-# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
-#else
-# define SOFTIRQ_DISABLE_OFFSET (0)
-#endif
+#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
#ifndef PREEMPT_ACTIVE
#define PREEMPT_ACTIVE_BITS 1
@@ -78,17 +74,10 @@
#endif
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
| NMI_MASK))
-#ifndef CONFIG_PREEMPT_RT_FULL
-# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
-# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-#else
-# define softirq_count() (0UL)
-extern int in_serving_softirq(void);
-#endif
-
/*
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context? Interrupt context?
@@ -98,6 +87,7 @@ extern int in_serving_softirq(void);
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
+#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
/*
* Are we in NMI context?
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 84223de..ef788b5 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -7,7 +7,6 @@
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
-#include <linux/sched.h>
#include <asm/cacheflush.h>
@@ -86,51 +85,32 @@ static inline void __kunmap_atomic(void *addr)
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
-#ifndef CONFIG_PREEMPT_RT_FULL
DECLARE_PER_CPU(int, __kmap_atomic_idx);
-#endif
static inline int kmap_atomic_idx_push(void)
{
-#ifndef CONFIG_PREEMPT_RT_FULL
int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
-# ifdef CONFIG_DEBUG_HIGHMEM
+#ifdef CONFIG_DEBUG_HIGHMEM
WARN_ON_ONCE(in_irq() && !irqs_disabled());
BUG_ON(idx > KM_TYPE_NR);
-# endif
- return idx;
-#else
- current->kmap_idx++;
- BUG_ON(current->kmap_idx > KM_TYPE_NR);
- return current->kmap_idx - 1;
#endif
+ return idx;
}
static inline int kmap_atomic_idx(void)
{
-#ifndef CONFIG_PREEMPT_RT_FULL
return __this_cpu_read(__kmap_atomic_idx) - 1;
-#else
- return current->kmap_idx - 1;
-#endif
}
static inline void kmap_atomic_idx_pop(void)
{
-#ifndef CONFIG_PREEMPT_RT_FULL
-# ifdef CONFIG_DEBUG_HIGHMEM
+#ifdef CONFIG_DEBUG_HIGHMEM
int idx = __this_cpu_dec_return(__kmap_atomic_idx);
BUG_ON(idx < 0);
-# else
- __this_cpu_dec(__kmap_atomic_idx);
-# endif
#else
- current->kmap_idx--;
-# ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(current->kmap_idx < 0);
-# endif
+ __this_cpu_dec(__kmap_atomic_idx);
#endif
}
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 113bcf1..cc07d27 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -111,11 +111,6 @@ struct hrtimer {
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
unsigned long state;
- struct list_head cb_entry;
- int irqsafe;
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- ktime_t praecox;
-#endif
#ifdef CONFIG_TIMER_STATS
int start_pid;
void *start_site;
@@ -152,7 +147,6 @@ struct hrtimer_clock_base {
int index;
clockid_t clockid;
struct timerqueue_head active;
- struct list_head expired;
ktime_t resolution;
ktime_t (*get_time)(void);
ktime_t softirq_time;
@@ -195,9 +189,6 @@ struct hrtimer_cpu_base {
unsigned long nr_hangs;
ktime_t max_hang_time;
#endif
-#ifdef CONFIG_PREEMPT_RT_BASE
- wait_queue_head_t wait;
-#endif
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
};
@@ -391,13 +382,6 @@ static inline int hrtimer_restart(struct hrtimer *timer)
return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
-/* Softirq preemption could deadlock timer removal */
-#ifdef CONFIG_PREEMPT_RT_BASE
- extern void hrtimer_wait_for_timer(const struct hrtimer *timer);
-#else
-# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0)
-#endif
-
/* Query timers: */
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index db695d5..0c80d3f 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -185,7 +185,8 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
extern const struct file_operations hugetlbfs_file_operations;
extern const struct vm_operations_struct hugetlb_vm_ops;
-struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
+struct file *hugetlb_file_setup(const char *name, unsigned long addr,
+ size_t size, vm_flags_t acct,
struct user_struct **user, int creat_flags,
int page_size_log);
@@ -204,8 +205,8 @@ static inline int is_file_hugepages(struct file *file)
#define is_file_hugepages(file) 0
static inline struct file *
-hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
- struct user_struct **user, int creat_flags,
+hugetlb_file_setup(const char *name, unsigned long addr, size_t size,
+ vm_flags_t acctflag, struct user_struct **user, int creat_flags,
int page_size_log)
{
return ERR_PTR(-ENOSYS);
@@ -283,13 +284,6 @@ static inline struct hstate *hstate_file(struct file *f)
return hstate_inode(f->f_dentry->d_inode);
}
-static inline struct hstate *hstate_sizelog(int page_size_log)
-{
- if (!page_size_log)
- return &default_hstate;
- return size_to_hstate(1 << page_size_log);
-}
-
static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
{
return hstate_file(vma->vm_file);
@@ -354,12 +348,11 @@ static inline int hstate_index(struct hstate *h)
return h - hstates;
}
-#else /* CONFIG_HUGETLB_PAGE */
+#else
struct hstate {};
#define alloc_huge_page_node(h, nid) NULL
#define alloc_bootmem_huge_page(h) NULL
#define hstate_file(f) NULL
-#define hstate_sizelog(s) NULL
#define hstate_vma(v) NULL
#define hstate_inode(i) NULL
#define huge_page_size(h) PAGE_SIZE
@@ -374,6 +367,6 @@ static inline unsigned int pages_per_huge_page(struct hstate *h)
}
#define hstate_index_to_shift(index) 0
#define hstate_index(h) 0
-#endif /* CONFIG_HUGETLB_PAGE */
+#endif
#endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/idr.h b/include/linux/idr.h
index e5eb125..de7e190 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -136,7 +136,7 @@ struct ida {
struct ida_bitmap *free_bitmap;
};
-#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, }
+#define IDA_INIT(name) { .idr = IDR_INIT(name), .free_bitmap = NULL, }
#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 218a3b6..d06cc5c 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -331,7 +331,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
struct vlan_hdr *vhdr)
{
__be16 proto;
- unsigned short *rawp;
+ unsigned char *rawp;
/*
* Was a VLAN packet, grab the encapsulated protocol, which the layer
@@ -344,8 +344,8 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
return;
}
- rawp = (unsigned short *)(vhdr + 1);
- if (*rawp == 0xFFFF)
+ rawp = skb->data;
+ if (*(unsigned short *) rawp == 0xFFFF)
/*
* This is a magic hack to spot IPX packets. Older Novell
* breaks the protocol design and runs IPX over 802.3 without
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 10f32ab..6d087c5 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -141,12 +141,6 @@ extern struct task_group root_task_group;
# define INIT_PERF_EVENTS(tsk)
#endif
-#ifdef CONFIG_PREEMPT_RT_BASE
-# define INIT_TIMER_LIST .posix_timer_list = NULL,
-#else
-# define INIT_TIMER_LIST
-#endif
-
#define INIT_TASK_COMM "swapper"
/*
@@ -202,7 +196,6 @@ extern struct task_group root_task_group;
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
- INIT_TIMER_LIST \
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 11bdb1e..5fa5afe 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -58,7 +58,6 @@
* IRQF_NO_THREAD - Interrupt cannot be threaded
* IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
* resume time.
- * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
*/
#define IRQF_DISABLED 0x00000020
#define IRQF_SHARED 0x00000080
@@ -72,7 +71,6 @@
#define IRQF_FORCE_RESUME 0x00008000
#define IRQF_NO_THREAD 0x00010000
#define IRQF_EARLY_RESUME 0x00020000
-#define IRQF_NO_SOFTIRQ_CALL 0x00040000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
@@ -213,7 +211,7 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
#ifdef CONFIG_LOCKDEP
# define local_irq_enable_in_hardirq() do { } while (0)
#else
-# define local_irq_enable_in_hardirq() local_irq_enable_nort()
+# define local_irq_enable_in_hardirq() local_irq_enable()
#endif
extern void disable_irq_nosync(unsigned int irq);
@@ -385,13 +383,9 @@ static inline int disable_irq_wake(unsigned int irq)
#ifdef CONFIG_IRQ_FORCED_THREADING
-# ifndef CONFIG_PREEMPT_RT_BASE
- extern bool force_irqthreads;
-# else
-# define force_irqthreads (true)
-# endif
+extern bool force_irqthreads;
#else
-#define force_irqthreads (false)
+#define force_irqthreads (0)
#endif
#ifndef __ARCH_SET_SOFTIRQ_PENDING
@@ -447,14 +441,8 @@ struct softirq_action
void (*action)(struct softirq_action *);
};
-#ifndef CONFIG_PREEMPT_RT_FULL
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
-static inline void thread_do_softirq(void) { do_softirq(); }
-#else
-extern void thread_do_softirq(void);
-#endif
-
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);
@@ -462,8 +450,6 @@ extern void __raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
-extern void softirq_check_pending_idle(void);
-
/* This is the worklist that queues up per-cpu softirq work.
*
* send_remote_sendirq() adds work to these lists, and
@@ -504,9 +490,8 @@ extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
to be executed on some cpu at least once after this.
* If the tasklet is already scheduled, but its execution is still not
started, it will be executed only once.
- * If this tasklet is already running on another CPU, it is rescheduled
- for later.
- * Schedule must not be called from the tasklet itself (a lockup occurs)
+ * If this tasklet is already running on another CPU (or schedule is called
+ from tasklet itself), it is rescheduled for later.
* Tasklet is strictly serialized wrt itself, but not
wrt another tasklets. If client needs some intertask synchronization,
he makes it with spinlocks.
@@ -531,36 +516,27 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
enum
{
TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
- TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
- TASKLET_STATE_PENDING /* Tasklet is pending */
+ TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
};
-#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
-#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
-#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
-
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+#ifdef CONFIG_SMP
static inline int tasklet_trylock(struct tasklet_struct *t)
{
return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
}
-static inline int tasklet_tryunlock(struct tasklet_struct *t)
-{
- return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
-}
-
static inline void tasklet_unlock(struct tasklet_struct *t)
{
smp_mb__before_clear_bit();
clear_bit(TASKLET_STATE_RUN, &(t)->state);
}
-extern void tasklet_unlock_wait(struct tasklet_struct *t);
-
+static inline void tasklet_unlock_wait(struct tasklet_struct *t)
+{
+ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
+}
#else
#define tasklet_trylock(t) 1
-#define tasklet_tryunlock(t) 1
#define tasklet_unlock_wait(t) do { } while (0)
#define tasklet_unlock(t) do { } while (0)
#endif
@@ -609,8 +585,17 @@ static inline void tasklet_disable(struct tasklet_struct *t)
smp_mb();
}
-extern void tasklet_enable(struct tasklet_struct *t);
-extern void tasklet_hi_enable(struct tasklet_struct *t);
+static inline void tasklet_enable(struct tasklet_struct *t)
+{
+ smp_mb__before_atomic_dec();
+ atomic_dec(&t->count);
+}
+
+static inline void tasklet_hi_enable(struct tasklet_struct *t)
+{
+ smp_mb__before_atomic_dec();
+ atomic_dec(&t->count);
+}
extern void tasklet_kill(struct tasklet_struct *t);
extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
@@ -642,12 +627,6 @@ void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
tasklet_kill(&ttimer->tasklet);
}
-#ifdef CONFIG_PREEMPT_RT_FULL
-extern void softirq_early_init(void);
-#else
-static inline void softirq_early_init(void) { }
-#endif
-
/*
* Autoprobing for irqs:
*
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index c4d870b..ae221a7 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -43,8 +43,8 @@ struct ipc_namespace {
size_t shm_ctlmax;
size_t shm_ctlall;
- unsigned long shm_tot;
int shm_ctlmni;
+ int shm_tot;
/*
* Defines whether IPC_RMID is forced for _all_ shm segments regardless
* of shmctl()
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 3929bbe..fdf2c4a 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -70,7 +70,6 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data);
* IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
* IRQ_NESTED_TRHEAD - Interrupt nests into another thread
* IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
- * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT)
*/
enum {
IRQ_TYPE_NONE = 0x00000000,
@@ -95,14 +94,12 @@ enum {
IRQ_NESTED_THREAD = (1 << 15),
IRQ_NOTHREAD = (1 << 16),
IRQ_PER_CPU_DEVID = (1 << 17),
- IRQ_NO_SOFTIRQ_CALL = (1 << 18),
};
#define IRQF_MODIFY_MASK \
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
- IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
- IRQ_NO_SOFTIRQ_CALL)
+ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID)
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index a7edc47..623325e 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -52,7 +52,6 @@ struct irq_desc {
unsigned int irq_count; /* For detecting broken IRQs */
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
- u64 random_ip;
raw_spinlock_t lock;
struct cpumask *percpu_enabled;
#ifdef CONFIG_SMP
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index a52b35d..d176d65 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -25,6 +25,8 @@
# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
# define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
+# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
+# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
#else
# define trace_hardirqs_on() do { } while (0)
@@ -37,15 +39,9 @@
# define trace_softirqs_enabled(p) 0
# define trace_hardirq_enter() do { } while (0)
# define trace_hardirq_exit() do { } while (0)
-# define INIT_TRACE_IRQFLAGS
-#endif
-
-#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL)
-# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
-# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
-#else
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
+# define INIT_TRACE_IRQFLAGS
#endif
#if defined(CONFIG_IRQSOFF_TRACER) || \
@@ -151,23 +147,4 @@
#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
-/*
- * local_irq* variants depending on RT/!RT
- */
-#ifdef CONFIG_PREEMPT_RT_FULL
-# define local_irq_disable_nort() do { } while (0)
-# define local_irq_enable_nort() do { } while (0)
-# define local_irq_save_nort(flags) do { local_save_flags(flags); } while (0)
-# define local_irq_restore_nort(flags) do { (void)(flags); } while (0)
-# define local_irq_disable_rt() local_irq_disable()
-# define local_irq_enable_rt() local_irq_enable()
-#else
-# define local_irq_disable_nort() local_irq_disable()
-# define local_irq_enable_nort() local_irq_enable()
-# define local_irq_save_nort(flags) local_irq_save(flags)
-# define local_irq_restore_nort(flags) local_irq_restore(flags)
-# define local_irq_disable_rt() do { } while (0)
-# define local_irq_enable_rt() do { } while (0)
-#endif
-
#endif
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 383bef0..e30b663 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -498,7 +498,6 @@ struct transaction_s
T_COMMIT,
T_COMMIT_DFLUSH,
T_COMMIT_JFLUSH,
- T_COMMIT_CALLBACK,
T_FINISHED
} t_state;
@@ -1211,7 +1210,6 @@ int __jbd2_log_start_commit(journal_t *journal, tid_t tid);
int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
int jbd2_journal_force_commit_nested(journal_t *journal);
int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
-int jbd2_complete_transaction(journal_t *journal, tid_t tid);
int jbd2_log_do_checkpoint(journal_t *journal);
int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h
index 0dbc151..6133679 100644
--- a/include/linux/jbd_common.h
+++ b/include/linux/jbd_common.h
@@ -39,56 +39,32 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh)
static inline void jbd_lock_bh_state(struct buffer_head *bh)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_lock(BH_State, &bh->b_state);
-#else
- spin_lock(&bh->b_state_lock);
-#endif
}
static inline int jbd_trylock_bh_state(struct buffer_head *bh)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
return bit_spin_trylock(BH_State, &bh->b_state);
-#else
- return spin_trylock(&bh->b_state_lock);
-#endif
}
static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
return bit_spin_is_locked(BH_State, &bh->b_state);
-#else
- return spin_is_locked(&bh->b_state_lock);
-#endif
}
static inline void jbd_unlock_bh_state(struct buffer_head *bh)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_unlock(BH_State, &bh->b_state);
-#else
- spin_unlock(&bh->b_state_lock);
-#endif
}
static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_lock(BH_JournalHead, &bh->b_state);
-#else
- spin_lock(&bh->b_journal_head_lock);
-#endif
}
static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_unlock(BH_JournalHead, &bh->b_state);
-#else
- spin_unlock(&bh->b_journal_head_lock);
-#endif
}
#endif
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 8fb8edf..82ed068 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -75,6 +75,7 @@ extern int register_refined_jiffies(long clock_tick_rate);
*/
extern u64 __jiffy_data jiffies_64;
extern unsigned long volatile __jiffy_data jiffies;
+extern seqlock_t jiffies_lock;
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void);
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 40c876b..0976fc4 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -50,8 +50,7 @@
#include <linux/compiler.h>
#include <linux/workqueue.h>
-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) && \
- !defined(CONFIG_PREEMPT_BASE)
+#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
struct static_key {
atomic_t enabled;
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 680ad23..7f6fe6e 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -115,7 +115,7 @@ extern int kdb_trap_printk;
extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args);
extern __printf(1, 2) int kdb_printf(const char *, ...);
typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
-#define in_kdb_printk() (kdb_trap_printk)
+
extern void kdb_init(int level);
/* Access to kdb specific polling devices */
@@ -150,7 +150,6 @@ extern int kdb_register_repeat(char *, kdb_func_t, char *, char *,
extern int kdb_unregister(char *);
#else /* ! CONFIG_KGDB_KDB */
static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
-#define in_kdb_printk() (0)
static inline void kdb_init(int level) {}
static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
char *help, short minlen) { return 0; }
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 8b3086d..c566927 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -412,7 +412,6 @@ extern enum system_states {
SYSTEM_HALT,
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
- SYSTEM_SUSPEND,
} system_state;
#define TAINT_PROPRIETARY_MODULE 0
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ffdf8b7..2c497ab 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -511,7 +511,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
- gpa_t gpa, unsigned long len);
+ gpa_t gpa);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index b0bcce0..fa7cc72 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -71,7 +71,6 @@ struct gfn_to_hva_cache {
u64 generation;
gpa_t gpa;
unsigned long hva;
- unsigned long len;
struct kvm_memory_slot *memslot;
};
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index d2c0d6d..0d24e93 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -42,37 +42,22 @@
#endif
struct lglock {
-#ifndef CONFIG_PREEMPT_RT_FULL
arch_spinlock_t __percpu *lock;
-#else
- struct rt_mutex __percpu *lock;
-#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lock_class_key lock_key;
struct lockdep_map lock_dep_map;
#endif
};
-#ifndef CONFIG_PREEMPT_RT_FULL
-# define DEFINE_LGLOCK(name) \
+#define DEFINE_LGLOCK(name) \
static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
= __ARCH_SPIN_LOCK_UNLOCKED; \
struct lglock name = { .lock = &name ## _lock }
-# define DEFINE_STATIC_LGLOCK(name) \
+#define DEFINE_STATIC_LGLOCK(name) \
static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
= __ARCH_SPIN_LOCK_UNLOCKED; \
static struct lglock name = { .lock = &name ## _lock }
-#else
-
-# define DEFINE_LGLOCK(name) \
- static DEFINE_PER_CPU(struct rt_mutex, name ## _lock); \
- struct lglock name = { .lock = &name ## _lock }
-
-# define DEFINE_STATIC_LGLOCK(name) \
- static DEFINE_PER_CPU(struct rt_mutex, name ## _lock); \
- static struct lglock name = { .lock = &name ## _lock }
-#endif
void lg_lock_init(struct lglock *lg, char *name);
void lg_local_lock(struct lglock *lg);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 0621bca..649e5f8 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -398,7 +398,6 @@ enum {
ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */
ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
- ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
diff --git a/include/linux/list.h b/include/linux/list.h
index 7a9851b..cc6d2aa 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -362,17 +362,6 @@ static inline void list_splice_tail_init(struct list_head *list,
list_entry((ptr)->next, type, member)
/**
- * list_last_entry - get the last element from a list
- * @ptr: the list head to take the element from.
- * @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
- *
- * Note, that list is expected to be not empty.
- */
-#define list_last_entry(ptr, type, member) \
- list_entry((ptr)->prev, type, member)
-
-/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
diff --git a/include/linux/llist.h b/include/linux/llist.h
index a5199f6..d0ab98f 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -125,6 +125,31 @@ static inline void init_llist_head(struct llist_head *list)
(pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
/**
+ * llist_for_each_entry_safe - iterate safely against remove over some entries
+ * of lock-less list of given type.
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as a temporary storage.
+ * @node: the fist entry of deleted list entries.
+ * @member: the name of the llist_node with the struct.
+ *
+ * In general, some entries of the lock-less list can be traversed
+ * safely only after being removed from list, so start with an entry
+ * instead of list head. This variant allows removal of entries
+ * as we iterate.
+ *
+ * If being used on entries deleted from lock-less list directly, the
+ * traverse order is from the newest to the oldest added entry. If
+ * you want to traverse from the oldest to the newest, you must
+ * reverse the order by yourself before traversing.
+ */
+#define llist_for_each_entry_safe(pos, n, node, member) \
+ for ((pos) = llist_entry((node), typeof(*(pos)), member), \
+ (n) = (pos)->member.next; \
+ &(pos)->member != NULL; \
+ (pos) = llist_entry(n, typeof(*(pos)), member), \
+ (n) = (&(pos)->member != NULL) ? (pos)->member.next : NULL)
+
+/**
* llist_empty - tests whether a lock-less list is empty
* @head: the list to test
*
diff --git a/include/linux/locallock.h b/include/linux/locallock.h
deleted file mode 100644
index a5eea5d..0000000
--- a/include/linux/locallock.h
+++ /dev/null
@@ -1,253 +0,0 @@
-#ifndef _LINUX_LOCALLOCK_H
-#define _LINUX_LOCALLOCK_H
-
-#include <linux/spinlock.h>
-
-#ifdef CONFIG_PREEMPT_RT_BASE
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-# define LL_WARN(cond) WARN_ON(cond)
-#else
-# define LL_WARN(cond) do { } while (0)
-#endif
-
-/*
- * per cpu lock based substitute for local_irq_*()
- */
-struct local_irq_lock {
- spinlock_t lock;
- struct task_struct *owner;
- int nestcnt;
- unsigned long flags;
-};
-
-#define DEFINE_LOCAL_IRQ_LOCK(lvar) \
- DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \
- .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) }
-
-#define DECLARE_LOCAL_IRQ_LOCK(lvar) \
- DECLARE_PER_CPU(struct local_irq_lock, lvar)
-
-#define local_irq_lock_init(lvar) \
- do { \
- int __cpu; \
- for_each_possible_cpu(__cpu) \
- spin_lock_init(&per_cpu(lvar, __cpu).lock); \
- } while (0)
-
-static inline void __local_lock(struct local_irq_lock *lv)
-{
- if (lv->owner != current) {
- spin_lock(&lv->lock);
- LL_WARN(lv->owner);
- LL_WARN(lv->nestcnt);
- lv->owner = current;
- }
- lv->nestcnt++;
-}
-
-#define local_lock(lvar) \
- do { __local_lock(&get_local_var(lvar)); } while (0)
-
-static inline int __local_trylock(struct local_irq_lock *lv)
-{
- if (lv->owner != current && spin_trylock(&lv->lock)) {
- LL_WARN(lv->owner);
- LL_WARN(lv->nestcnt);
- lv->owner = current;
- lv->nestcnt = 1;
- return 1;
- }
- return 0;
-}
-
-#define local_trylock(lvar) \
- ({ \
- int __locked; \
- __locked = __local_trylock(&get_local_var(lvar)); \
- if (!__locked) \
- put_local_var(lvar); \
- __locked; \
- })
-
-static inline void __local_unlock(struct local_irq_lock *lv)
-{
- LL_WARN(lv->nestcnt == 0);
- LL_WARN(lv->owner != current);
- if (--lv->nestcnt)
- return;
-
- lv->owner = NULL;
- spin_unlock(&lv->lock);
-}
-
-#define local_unlock(lvar) \
- do { \
- __local_unlock(&__get_cpu_var(lvar)); \
- put_local_var(lvar); \
- } while (0)
-
-static inline void __local_lock_irq(struct local_irq_lock *lv)
-{
- spin_lock_irqsave(&lv->lock, lv->flags);
- LL_WARN(lv->owner);
- LL_WARN(lv->nestcnt);
- lv->owner = current;
- lv->nestcnt = 1;
-}
-
-#define local_lock_irq(lvar) \
- do { __local_lock_irq(&get_local_var(lvar)); } while (0)
-
-#define local_lock_irq_on(lvar, cpu) \
- do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0)
-
-static inline void __local_unlock_irq(struct local_irq_lock *lv)
-{
- LL_WARN(!lv->nestcnt);
- LL_WARN(lv->owner != current);
- lv->owner = NULL;
- lv->nestcnt = 0;
- spin_unlock_irq(&lv->lock);
-}
-
-#define local_unlock_irq(lvar) \
- do { \
- __local_unlock_irq(&__get_cpu_var(lvar)); \
- put_local_var(lvar); \
- } while (0)
-
-#define local_unlock_irq_on(lvar, cpu) \
- do { \
- __local_unlock_irq(&per_cpu(lvar, cpu)); \
- } while (0)
-
-static inline int __local_lock_irqsave(struct local_irq_lock *lv)
-{
- if (lv->owner != current) {
- __local_lock_irq(lv);
- return 0;
- } else {
- lv->nestcnt++;
- return 1;
- }
-}
-
-#define local_lock_irqsave(lvar, _flags) \
- do { \
- if (__local_lock_irqsave(&get_local_var(lvar))) \
- put_local_var(lvar); \
- _flags = __get_cpu_var(lvar).flags; \
- } while (0)
-
-#define local_lock_irqsave_on(lvar, _flags, cpu) \
- do { \
- __local_lock_irqsave(&per_cpu(lvar, cpu)); \
- _flags = per_cpu(lvar, cpu).flags; \
- } while (0)
-
-static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
- unsigned long flags)
-{
- LL_WARN(!lv->nestcnt);
- LL_WARN(lv->owner != current);
- if (--lv->nestcnt)
- return 0;
-
- lv->owner = NULL;
- spin_unlock_irqrestore(&lv->lock, lv->flags);
- return 1;
-}
-
-#define local_unlock_irqrestore(lvar, flags) \
- do { \
- if (__local_unlock_irqrestore(&__get_cpu_var(lvar), flags)) \
- put_local_var(lvar); \
- } while (0)
-
-#define local_unlock_irqrestore_on(lvar, flags, cpu) \
- do { \
- __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \
- } while (0)
-
-#define local_spin_trylock_irq(lvar, lock) \
- ({ \
- int __locked; \
- local_lock_irq(lvar); \
- __locked = spin_trylock(lock); \
- if (!__locked) \
- local_unlock_irq(lvar); \
- __locked; \
- })
-
-#define local_spin_lock_irq(lvar, lock) \
- do { \
- local_lock_irq(lvar); \
- spin_lock(lock); \
- } while (0)
-
-#define local_spin_unlock_irq(lvar, lock) \
- do { \
- spin_unlock(lock); \
- local_unlock_irq(lvar); \
- } while (0)
-
-#define local_spin_lock_irqsave(lvar, lock, flags) \
- do { \
- local_lock_irqsave(lvar, flags); \
- spin_lock(lock); \
- } while (0)
-
-#define local_spin_unlock_irqrestore(lvar, lock, flags) \
- do { \
- spin_unlock(lock); \
- local_unlock_irqrestore(lvar, flags); \
- } while (0)
-
-#define get_locked_var(lvar, var) \
- (*({ \
- local_lock(lvar); \
- &__get_cpu_var(var); \
- }))
-
-#define put_locked_var(lvar, var) local_unlock(lvar)
-
-#define local_lock_cpu(lvar) \
- ({ \
- local_lock(lvar); \
- smp_processor_id(); \
- })
-
-#define local_unlock_cpu(lvar) local_unlock(lvar)
-
-#else /* PREEMPT_RT_BASE */
-
-#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar
-#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar
-
-static inline void local_irq_lock_init(int lvar) { }
-
-#define local_lock(lvar) preempt_disable()
-#define local_unlock(lvar) preempt_enable()
-#define local_lock_irq(lvar) local_irq_disable()
-#define local_unlock_irq(lvar) local_irq_enable()
-#define local_lock_irqsave(lvar, flags) local_irq_save(flags)
-#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags)
-
-#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock)
-#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock)
-#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock)
-#define local_spin_lock_irqsave(lvar, lock, flags) \
- spin_lock_irqsave(lock, flags)
-#define local_spin_unlock_irqrestore(lvar, lock, flags) \
- spin_unlock_irqrestore(lock, flags)
-
-#define get_locked_var(lvar, var) get_cpu_var(var)
-#define put_locked_var(lvar, var) put_cpu_var(var)
-
-#define local_lock_cpu(lvar) get_cpu()
-#define local_unlock_cpu(lvar) put_cpu()
-
-#endif
-
-#endif
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
index acf4d31..4b117a3 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/mfd/rtsx_pci.h
@@ -735,7 +735,6 @@ struct rtsx_pcr {
unsigned int card_inserted;
unsigned int card_removed;
- unsigned int card_exist;
struct delayed_work carddet_work;
struct delayed_work idle_work;
@@ -800,7 +799,6 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk);
int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card);
int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card);
-int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card);
int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage);
unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr);
void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e3b3a15..66e2f7c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1259,59 +1259,27 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
* overflow into the next struct page (as it might with DEBUG_SPINLOCK).
* When freeing, reset page->mapping so free_pages_check won't complain.
*/
-#ifndef CONFIG_PREEMPT_RT_FULL
-
#define __pte_lockptr(page) &((page)->ptl)
-
-static inline struct page *pte_lock_init(struct page *page)
-{
- spin_lock_init(__pte_lockptr(page));
- return page;
-}
-
+#define pte_lock_init(_page) do { \
+ spin_lock_init(__pte_lockptr(_page)); \
+} while (0)
#define pte_lock_deinit(page) ((page)->mapping = NULL)
-
-#else /* !PREEMPT_RT_FULL */
-
-/*
- * On PREEMPT_RT_FULL the spinlock_t's are too large to embed in the
- * page frame, hence it only has a pointer and we need to dynamically
- * allocate the lock when we allocate PTE-pages.
- *
- * This is an overall win, since only a small fraction of the pages
- * will be PTE pages under normal circumstances.
- */
-
-#define __pte_lockptr(page) ((page)->ptl)
-
-extern struct page *pte_lock_init(struct page *page);
-extern void pte_lock_deinit(struct page *page);
-
-#endif /* PREEMPT_RT_FULL */
-
#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
#else /* !USE_SPLIT_PTLOCKS */
/*
* We use mm->page_table_lock to guard all pagetable pages of the mm.
*/
-static inline struct page *pte_lock_init(struct page *page) { return page; }
+#define pte_lock_init(page) do {} while (0)
#define pte_lock_deinit(page) do {} while (0)
#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
#endif /* USE_SPLIT_PTLOCKS */
-static inline struct page *__pgtable_page_ctor(struct page *page)
+static inline void pgtable_page_ctor(struct page *page)
{
- page = pte_lock_init(page);
- if (page)
- inc_zone_page_state(page, NR_PAGETABLE);
- return page;
+ pte_lock_init(page);
+ inc_zone_page_state(page, NR_PAGETABLE);
}
-#define pgtable_page_ctor(page) \
-do { \
- page = __pgtable_page_ctor(page); \
-} while (0)
-
static inline void pgtable_page_dtor(struct page *page)
{
pte_lock_deinit(page);
@@ -1655,8 +1623,6 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
-int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
-
struct page *follow_page(struct vm_area_struct *, unsigned long address,
unsigned int foll_flags);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 6270199..f8f5162 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -11,7 +11,6 @@
#include <linux/completion.h>
#include <linux/cpumask.h>
#include <linux/page-debug-flags.h>
-#include <linux/rcupdate.h>
#include <linux/uprobes.h>
#include <asm/page.h>
#include <asm/mmu.h>
@@ -142,11 +141,7 @@ struct page {
* system if PG_buddy is set.
*/
#if USE_SPLIT_PTLOCKS
-# ifndef CONFIG_PREEMPT_RT_FULL
spinlock_t ptl;
-# else
- spinlock_t *ptl;
-# endif
#endif
struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
struct page *first_page; /* Compound tail pages */
@@ -441,9 +436,6 @@ struct mm_struct {
int first_nid;
#endif
struct uprobes_state uprobes_state;
-#ifdef CONFIG_PREEMPT_RT_BASE
- struct rcu_head delayed_drop;
-#endif
};
/* first nid will either be a valid NID or one of these values */
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 73005f9..d7029f4 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -47,8 +47,6 @@ struct mnt_namespace;
#define MNT_INTERNAL 0x4000
-#define MNT_LOCK_READONLY 0x400000
-
struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
struct super_block *mnt_sb; /* pointer to superblock */
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index ef52d9c..7ccb3c5 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -187,13 +187,6 @@ typedef enum {
* This happens with the Renesas AG-AND chips, possibly others.
*/
#define BBT_AUTO_REFRESH 0x00000080
-/*
- * Chip requires ready check on read (for auto-incremented sequential read).
- * True only for small page devices; large page devices do not support
- * autoincrement.
- */
-#define NAND_NEED_READRDY 0x00000100
-
/* Chip does not allow subpage writes */
#define NAND_NO_SUBPAGE_WRITE 0x00000200
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index bdf1da2..9121595 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -17,17 +17,6 @@
#include <linux/atomic.h>
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
- , .dep_map = { .name = #lockname }
-#else
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
-#endif
-
-#ifdef CONFIG_PREEMPT_RT_FULL
-# include <linux/mutex_rt.h>
-#else
-
/*
* Simple, straightforward mutexes with strict semantics:
*
@@ -106,6 +95,13 @@ do { \
static inline void mutex_destroy(struct mutex *lock) {}
#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
+ , .dep_map = { .name = #lockname }
+#else
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
+#endif
+
#define __MUTEX_INITIALIZER(lockname) \
{ .count = ATOMIC_INIT(1) \
, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
@@ -171,9 +167,6 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
*/
extern int mutex_trylock(struct mutex *lock);
extern void mutex_unlock(struct mutex *lock);
-
-#endif /* !PREEMPT_RT_FULL */
-
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
diff --git a/include/linux/mutex_rt.h b/include/linux/mutex_rt.h
deleted file mode 100644
index c38a44b..0000000
--- a/include/linux/mutex_rt.h
+++ /dev/null
@@ -1,84 +0,0 @@
-#ifndef __LINUX_MUTEX_RT_H
-#define __LINUX_MUTEX_RT_H
-
-#ifndef __LINUX_MUTEX_H
-#error "Please include mutex.h"
-#endif
-
-#include <linux/rtmutex.h>
-
-/* FIXME: Just for __lockfunc */
-#include <linux/spinlock.h>
-
-struct mutex {
- struct rt_mutex lock;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-};
-
-#define __MUTEX_INITIALIZER(mutexname) \
- { \
- .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \
- __DEP_MAP_MUTEX_INITIALIZER(mutexname) \
- }
-
-#define DEFINE_MUTEX(mutexname) \
- struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
-
-extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key);
-extern void __lockfunc _mutex_lock(struct mutex *lock);
-extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
-extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
-extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass);
-extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
-extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass);
-extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass);
-extern int __lockfunc _mutex_trylock(struct mutex *lock);
-extern void __lockfunc _mutex_unlock(struct mutex *lock);
-
-#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock)
-#define mutex_lock(l) _mutex_lock(l)
-#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l)
-#define mutex_lock_killable(l) _mutex_lock_killable(l)
-#define mutex_trylock(l) _mutex_trylock(l)
-#define mutex_unlock(l) _mutex_unlock(l)
-#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock)
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s)
-# define mutex_lock_interruptible_nested(l, s) \
- _mutex_lock_interruptible_nested(l, s)
-# define mutex_lock_killable_nested(l, s) \
- _mutex_lock_killable_nested(l, s)
-
-# define mutex_lock_nest_lock(lock, nest_lock) \
-do { \
- typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
- _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
-} while (0)
-
-#else
-# define mutex_lock_nested(l, s) _mutex_lock(l)
-# define mutex_lock_interruptible_nested(l, s) \
- _mutex_lock_interruptible(l)
-# define mutex_lock_killable_nested(l, s) \
- _mutex_lock_killable(l)
-# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
-#endif
-
-# define mutex_init(mutex) \
-do { \
- static struct lock_class_key __key; \
- \
- rt_mutex_init(&(mutex)->lock); \
- __mutex_do_init((mutex), #mutex, &__key); \
-} while (0)
-
-# define __mutex_init(mutex, name, key) \
-do { \
- rt_mutex_init(&(mutex)->lock); \
- __mutex_do_init((mutex), name, key); \
-} while (0)
-
-#endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0b58fd6..9ef07d0 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -208,9 +208,9 @@ struct netdev_hw_addr {
#define NETDEV_HW_ADDR_T_SLAVE 3
#define NETDEV_HW_ADDR_T_UNICAST 4
#define NETDEV_HW_ADDR_T_MULTICAST 5
+ bool synced;
bool global_use;
int refcount;
- int synced;
struct rcu_head rcu_head;
};
@@ -1579,7 +1579,7 @@ extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
extern rwlock_t dev_base_lock; /* Device list lock */
-extern struct mutex devnet_rename_mutex;
+extern seqcount_t devnet_rename_seq; /* Device rename seq */
#define for_each_netdev(net, d) \
@@ -1783,7 +1783,6 @@ struct softnet_data {
unsigned int dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
- struct sk_buff_head tofree_queue;
};
static inline void input_queue_head_incr(struct softnet_data *sd)
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 7d083af..dd49566 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -3,7 +3,6 @@
#include <linux/netdevice.h>
-#include <linux/locallock.h>
#include <uapi/linux/netfilter/x_tables.h>
/**
@@ -285,8 +284,6 @@ extern void xt_free_table_info(struct xt_table_info *info);
*/
DECLARE_PER_CPU(seqcount_t, xt_recseq);
-DECLARE_LOCAL_IRQ_LOCK(xt_write_lock);
-
/**
* xt_write_recseq_begin - start of a write section
*
@@ -301,9 +298,6 @@ static inline unsigned int xt_write_recseq_begin(void)
{
unsigned int addend;
- /* RT protection */
- local_lock(xt_write_lock);
-
/*
* Low order bit of sequence is set if we already
* called xt_write_recseq_begin().
@@ -334,7 +328,6 @@ static inline void xt_write_recseq_end(unsigned int addend)
/* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
smp_wmb();
__this_cpu_add(xt_recseq.sequence, addend);
- local_unlock(xt_write_lock);
}
/*
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 6bfd703..d65746e 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -42,7 +42,9 @@
* in srcu_notifier_call_chain(): no cache bounces and no memory barriers.
* As compensation, srcu_notifier_chain_unregister() is rather expensive.
* SRCU notifier chains should be used when the chain will be called very
- * often but notifier_blocks will seldom be removed.
+ * often but notifier_blocks will seldom be removed. Also, SRCU notifier
+ * chains are slightly more difficult to use because they require special
+ * runtime initialization.
*/
struct notifier_block {
@@ -83,7 +85,7 @@ struct srcu_notifier_head {
(name)->head = NULL; \
} while (0)
-/* srcu_notifier_heads must be cleaned up dynamically */
+/* srcu_notifier_heads must be initialized and cleaned up dynamically */
extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
#define srcu_cleanup_notifier_head(name) \
cleanup_srcu_struct(&(name)->srcu);
@@ -96,13 +98,7 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
.head = NULL }
#define RAW_NOTIFIER_INIT(name) { \
.head = NULL }
-
-#define SRCU_NOTIFIER_INIT(name, pcpu) \
- { \
- .mutex = __MUTEX_INITIALIZER(name.mutex), \
- .head = NULL, \
- .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \
- }
+/* srcu_notifier_heads cannot be initialized statically */
#define ATOMIC_NOTIFIER_HEAD(name) \
struct atomic_notifier_head name = \
@@ -114,18 +110,6 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
struct raw_notifier_head name = \
RAW_NOTIFIER_INIT(name)
-#define _SRCU_NOTIFIER_HEAD(name, mod) \
- static DEFINE_PER_CPU(struct srcu_struct_array, \
- name##_head_srcu_array); \
- mod struct srcu_notifier_head name = \
- SRCU_NOTIFIER_INIT(name, name##_head_srcu_array)
-
-#define SRCU_NOTIFIER_HEAD(name) \
- _SRCU_NOTIFIER_HEAD(name, )
-
-#define SRCU_NOTIFIER_HEAD_STATIC(name) \
- _SRCU_NOTIFIER_HEAD(name, static)
-
#ifdef __KERNEL__
extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
diff --git a/include/linux/of.h b/include/linux/of.h
index bb35c42..5ebcc5c 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -92,7 +92,7 @@ static inline void of_node_put(struct device_node *node) { }
extern struct device_node *of_allnodes;
extern struct device_node *of_chosen;
extern struct device_node *of_aliases;
-extern raw_spinlock_t devtree_lock;
+extern rwlock_t devtree_lock;
static inline bool of_have_populated_dt(void)
{
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index ca67e80..777a524 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -24,9 +24,6 @@ enum {
*/
struct page_cgroup {
unsigned long flags;
-#ifdef CONFIG_PREEMPT_RT_BASE
- spinlock_t pcg_lock;
-#endif
struct mem_cgroup *mem_cgroup;
};
@@ -77,20 +74,12 @@ static inline void lock_page_cgroup(struct page_cgroup *pc)
* Don't take this lock in IRQ context.
* This lock is for pc->mem_cgroup, USED, MIGRATION
*/
-#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_lock(PCG_LOCK, &pc->flags);
-#else
- spin_lock(&pc->pcg_lock);
-#endif
}
static inline void unlock_page_cgroup(struct page_cgroup *pc)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_unlock(PCG_LOCK, &pc->flags);
-#else
- spin_unlock(&pc->pcg_lock);
-#endif
}
#else /* CONFIG_MEMCG */
@@ -113,10 +102,6 @@ static inline void __init page_cgroup_init_flatmem(void)
{
}
-static inline void page_cgroup_lock_init(struct page_cgroup *pc)
-{
-}
-
#endif /* CONFIG_MEMCG */
#include <linux/swap.h>
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 12b394f..cc88172 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -48,31 +48,6 @@
preempt_enable(); \
} while (0)
-#ifndef CONFIG_PREEMPT_RT_FULL
-# define get_local_var(var) get_cpu_var(var)
-# define put_local_var(var) put_cpu_var(var)
-# define get_local_ptr(var) get_cpu_ptr(var)
-# define put_local_ptr(var) put_cpu_ptr(var)
-#else
-# define get_local_var(var) (*({ \
- migrate_disable(); \
- &__get_cpu_var(var); }))
-
-# define put_local_var(var) do { \
- (void)&(var); \
- migrate_enable(); \
-} while (0)
-
-# define get_local_ptr(var) ({ \
- migrate_disable(); \
- this_cpu_ptr(var); })
-
-# define put_local_ptr(var) do { \
- (void)(var); \
- migrate_enable(); \
-} while (0)
-#endif
-
/* minimum unit size, also is the maximum supported allocation size */
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index a280650..6bfb2faa 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -794,12 +794,6 @@ static inline int __perf_event_disable(void *info) { return -1; }
static inline void perf_event_task_tick(void) { }
#endif
-#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
-extern void perf_restore_debug_store(void);
-#else
-static inline void perf_restore_debug_store(void) { }
-#endif
-
#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
/*
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 3b67343..2381c97 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -2,7 +2,6 @@
#define _LINUX_PID_H
#include <linux/rcupdate.h>
-#include <linux/atomic.h>
enum pid_type
{
diff --git a/include/linux/platform_data/cpsw.h b/include/linux/platform_data/cpsw.h
index bb3cd58..24368a2 100644
--- a/include/linux/platform_data/cpsw.h
+++ b/include/linux/platform_data/cpsw.h
@@ -21,8 +21,6 @@ struct cpsw_slave_data {
char phy_id[MII_BUS_ID_SIZE];
int phy_if;
u8 mac_addr[ETH_ALEN];
- u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
-
};
struct cpsw_platform_data {
@@ -30,15 +28,13 @@ struct cpsw_platform_data {
u32 channels; /* number of cpdma channels (symmetric) */
u32 slaves; /* number of slave cpgmac ports */
struct cpsw_slave_data *slave_data;
- u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
+ u32 cpts_active_slave; /* time stamping slave */
u32 cpts_clock_mult; /* convert input clock ticks to nanoseconds */
u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */
u32 ale_entries; /* ale table size */
u32 bd_ram_size; /*buffer descriptor ram size */
u32 rx_descs; /* Number of Rx Descriptios */
u32 mac_control; /* Mac control register */
- u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
- bool dual_emac; /* Enable Dual EMAC mode */
};
#endif /* __CPSW_H__ */
diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
index 7db3eb9..0cc45ae 100644
--- a/include/linux/pps_kernel.h
+++ b/include/linux/pps_kernel.h
@@ -43,7 +43,7 @@ struct pps_source_info {
int event, void *data); /* PPS echo function */
struct module *owner;
- struct device *dev; /* Parent device for device_create */
+ struct device *dev;
};
struct pps_event_time {
@@ -69,7 +69,6 @@ struct pps_device {
wait_queue_head_t queue; /* PPS event queue */
unsigned int id; /* PPS source unique ID */
- void const *lookup_cookie; /* pps_lookup_dev only */
struct cdev cdev;
struct device *dev;
struct fasync_struct *async_queue; /* fasync method */
@@ -83,26 +82,16 @@ struct pps_device {
extern struct device_attribute pps_attrs[];
/*
- * Internal functions.
- *
- * These are not actually part of the exported API, but this is a
- * convenient header file to put them in.
- */
-
-extern int pps_register_cdev(struct pps_device *pps);
-extern void pps_unregister_cdev(struct pps_device *pps);
-
-/*
* Exported functions
*/
extern struct pps_device *pps_register_source(
struct pps_source_info *info, int default_params);
extern void pps_unregister_source(struct pps_device *pps);
+extern int pps_register_cdev(struct pps_device *pps);
+extern void pps_unregister_cdev(struct pps_device *pps);
extern void pps_event(struct pps_device *pps,
struct pps_event_time *ts, int event, void *data);
-/* Look up a pps device by magic cookie */
-struct pps_device *pps_lookup_dev(void const *cookie);
static inline void timespec_to_pps_ktime(struct pps_ktime *kt,
struct timespec ts)
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index a7f4212..5a710b9 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -23,38 +23,15 @@
#define preempt_count() (current_thread_info()->preempt_count)
-#ifdef CONFIG_PREEMPT_LAZY
-#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0)
-#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0)
-#define inc_preempt_lazy_count() add_preempt_lazy_count(1)
-#define dec_preempt_lazy_count() sub_preempt_lazy_count(1)
-#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count)
-#else
-#define add_preempt_lazy_count(val) do { } while (0)
-#define sub_preempt_lazy_count(val) do { } while (0)
-#define inc_preempt_lazy_count() do { } while (0)
-#define dec_preempt_lazy_count() do { } while (0)
-#define preempt_lazy_count() (0)
-#endif
-
#ifdef CONFIG_PREEMPT
asmlinkage void preempt_schedule(void);
-# ifdef CONFIG_PREEMPT_LAZY
-#define preempt_check_resched() \
-do { \
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED) || \
- test_thread_flag(TIF_NEED_RESCHED_LAZY))) \
- preempt_schedule(); \
-} while (0)
-# else
#define preempt_check_resched() \
do { \
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
+ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)
-# endif
#else /* !CONFIG_PREEMPT */
@@ -71,36 +48,17 @@ do { \
barrier(); \
} while (0)
-#define preempt_lazy_disable() \
-do { \
- inc_preempt_lazy_count(); \
- barrier(); \
-} while (0)
-
#define sched_preempt_enable_no_resched() \
do { \
barrier(); \
dec_preempt_count(); \
} while (0)
-#ifndef CONFIG_PREEMPT_RT_BASE
-# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
-# define preempt_check_resched_rt() barrier()
-#else
-# define preempt_enable_no_resched() preempt_enable()
-# define preempt_check_resched_rt() preempt_check_resched()
-#endif
+#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
#define preempt_enable() \
do { \
- sched_preempt_enable_no_resched(); \
- barrier(); \
- preempt_check_resched(); \
-} while (0)
-
-#define preempt_lazy_enable() \
-do { \
- dec_preempt_lazy_count(); \
+ preempt_enable_no_resched(); \
barrier(); \
preempt_check_resched(); \
} while (0)
@@ -135,45 +93,17 @@ do { \
#else /* !CONFIG_PREEMPT_COUNT */
-/*
- * Even if we don't have any preemption, we need preempt disable/enable
- * to be barriers, so that we don't have things like get_user/put_user
- * that can cause faults and scheduling migrate into our preempt-protected
- * region.
- */
-#define preempt_disable() barrier()
-#define sched_preempt_enable_no_resched() barrier()
-#define preempt_enable_no_resched() barrier()
-#define preempt_enable() barrier()
+#define preempt_disable() do { } while (0)
+#define sched_preempt_enable_no_resched() do { } while (0)
+#define preempt_enable_no_resched() do { } while (0)
+#define preempt_enable() do { } while (0)
-#define preempt_disable_notrace() barrier()
-#define preempt_enable_no_resched_notrace() barrier()
-#define preempt_enable_notrace() barrier()
-#define preempt_check_resched_rt() barrier()
+#define preempt_disable_notrace() do { } while (0)
+#define preempt_enable_no_resched_notrace() do { } while (0)
+#define preempt_enable_notrace() do { } while (0)
#endif /* CONFIG_PREEMPT_COUNT */
-#ifdef CONFIG_PREEMPT_RT_FULL
-# define preempt_disable_rt() preempt_disable()
-# define preempt_enable_rt() preempt_enable()
-# define preempt_disable_nort() barrier()
-# define preempt_enable_nort() barrier()
-# ifdef CONFIG_SMP
- extern void migrate_disable(void);
- extern void migrate_enable(void);
-# else /* CONFIG_SMP */
-# define migrate_disable() barrier()
-# define migrate_enable() barrier()
-# endif /* CONFIG_SMP */
-#else
-# define preempt_disable_rt() barrier()
-# define preempt_enable_rt() barrier()
-# define preempt_disable_nort() preempt_disable()
-# define preempt_enable_nort() preempt_enable()
-# define migrate_disable() preempt_disable()
-# define migrate_enable() preempt_enable()
-#endif
-
#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier;
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 812d102..9afc01e 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -95,16 +95,8 @@ int no_printk(const char *fmt, ...)
return 0;
}
-#ifdef CONFIG_EARLY_PRINTK
extern asmlinkage __printf(1, 2)
void early_printk(const char *fmt, ...);
-void early_vprintk(const char *fmt, va_list ap);
-extern void printk_kill(void);
-#else
-static inline __printf(1, 2) __cold
-void early_printk(const char *s, ...) { }
-static inline void printk_kill(void) { }
-#endif
extern int printk_needs_cpu(int cpu);
extern void printk_tick(void);
@@ -140,6 +132,7 @@ extern int __printk_ratelimit(const char *func);
#define printk_ratelimit() __printk_ratelimit(__func__)
extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
unsigned int interval_msec);
+
extern int printk_delay_msec;
extern int dmesg_restrict;
extern int kptr_restrict;
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 75d0176..1788909 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -68,18 +68,12 @@ struct pstore_info {
#ifdef CONFIG_PSTORE
extern int pstore_register(struct pstore_info *);
-extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
#else
static inline int
pstore_register(struct pstore_info *psi)
{
return -ENODEV;
}
-static inline bool
-pstore_cannot_block_path(enum kmsg_dump_reason reason)
-{
- return false;
-}
#endif
#endif /*_LINUX_PSTORE_H*/
diff --git a/include/linux/quota.h b/include/linux/quota.h
index d133711..58fdef12 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -405,7 +405,6 @@ struct quota_module_name {
#define INIT_QUOTA_MODULE_NAMES {\
{QFMT_VFS_OLD, "quota_v1"},\
{QFMT_VFS_V0, "quota_v2"},\
- {QFMT_VFS_V1, "quota_v2"},\
{0, NULL}}
#endif /* _QUOTA_ */
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 7ddfbf9..ffc444c 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -230,13 +230,7 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root,
unsigned long index, unsigned long max_scan);
unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
unsigned long index, unsigned long max_scan);
-
-#ifndef CONFIG_PREEMPT_RT_FULL
int radix_tree_preload(gfp_t gfp_mask);
-#else
-static inline int radix_tree_preload(gfp_t gm) { return 0; }
-#endif
-
void radix_tree_init(void);
void *radix_tree_tag_set(struct radix_tree_root *root,
unsigned long index, unsigned int tag);
@@ -261,7 +255,7 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
static inline void radix_tree_preload_end(void)
{
- preempt_enable_nort();
+ preempt_enable();
}
/**
diff --git a/include/linux/random.h b/include/linux/random.h
index f975382..d984608 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -12,7 +12,7 @@
extern void add_device_randomness(const void *, unsigned int);
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value);
-extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip);
+extern void add_interrupt_randomness(int irq, int irq_flags);
extern void get_random_bytes(void *buf, int nbytes);
extern void get_random_bytes_arch(void *buf, int nbytes);
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 33e1d2e..275aa3f1 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -120,9 +120,6 @@ extern void call_rcu(struct rcu_head *head,
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
-#ifdef CONFIG_PREEMPT_RT_FULL
-#define call_rcu_bh call_rcu
-#else
/**
* call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
* @head: structure to be used for queueing the RCU updates.
@@ -146,7 +143,6 @@ extern void call_rcu(struct rcu_head *head,
*/
extern void call_rcu_bh(struct rcu_head *head,
void (*func)(struct rcu_head *head));
-#endif
/**
* call_rcu_sched() - Queue an RCU for invocation after sched grace period.
@@ -186,11 +182,6 @@ void synchronize_rcu(void);
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
*/
#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
-#ifndef CONFIG_PREEMPT_RT_FULL
-#define sched_rcu_preempt_depth() rcu_preempt_depth()
-#else
-static inline int sched_rcu_preempt_depth(void) { return 0; }
-#endif
#else /* #ifdef CONFIG_PREEMPT_RCU */
@@ -214,8 +205,6 @@ static inline int rcu_preempt_depth(void)
return 0;
}
-#define sched_rcu_preempt_depth() rcu_preempt_depth()
-
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
/* Internal to kernel */
@@ -370,14 +359,7 @@ static inline int rcu_read_lock_held(void)
* rcu_read_lock_bh_held() is defined out of line to avoid #include-file
* hell.
*/
-#ifdef CONFIG_PREEMPT_RT_FULL
-static inline int rcu_read_lock_bh_held(void)
-{
- return rcu_read_lock_held();
-}
-#else
extern int rcu_read_lock_bh_held(void);
-#endif
/**
* rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
@@ -825,14 +807,10 @@ static inline void rcu_read_unlock(void)
static inline void rcu_read_lock_bh(void)
{
local_bh_disable();
-#ifdef CONFIG_PREEMPT_RT_FULL
- rcu_read_lock();
-#else
__acquire(RCU_BH);
rcu_lock_acquire(&rcu_bh_lock_map);
rcu_lockdep_assert(!rcu_is_cpu_idle(),
"rcu_read_lock_bh() used illegally while idle");
-#endif
}
/*
@@ -842,14 +820,10 @@ static inline void rcu_read_lock_bh(void)
*/
static inline void rcu_read_unlock_bh(void)
{
-#ifdef CONFIG_PREEMPT_RT_FULL
- rcu_read_unlock();
-#else
rcu_lockdep_assert(!rcu_is_cpu_idle(),
"rcu_read_unlock_bh() used illegally while idle");
rcu_lock_release(&rcu_bh_lock_map);
__release(RCU_BH);
-#endif
local_bh_enable();
}
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index f1472a2..952b793 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -45,11 +45,7 @@ static inline void rcu_virt_note_context_switch(int cpu)
rcu_note_context_switch(cpu);
}
-#ifdef CONFIG_PREEMPT_RT_FULL
-# define synchronize_rcu_bh synchronize_rcu
-#else
extern void synchronize_rcu_bh(void);
-#endif
extern void synchronize_sched_expedited(void);
extern void synchronize_rcu_expedited(void);
@@ -77,30 +73,20 @@ static inline void synchronize_rcu_bh_expedited(void)
}
extern void rcu_barrier(void);
-#ifdef CONFIG_PREEMPT_RT_FULL
-# define rcu_barrier_bh rcu_barrier
-#else
extern void rcu_barrier_bh(void);
-#endif
extern void rcu_barrier_sched(void);
extern unsigned long rcutorture_testseq;
extern unsigned long rcutorture_vernum;
extern long rcu_batches_completed(void);
+extern long rcu_batches_completed_bh(void);
extern long rcu_batches_completed_sched(void);
extern void rcu_force_quiescent_state(void);
+extern void rcu_bh_force_quiescent_state(void);
extern void rcu_sched_force_quiescent_state(void);
extern void rcu_scheduler_starting(void);
extern int rcu_scheduler_active __read_mostly;
-#ifndef CONFIG_PREEMPT_RT_FULL
-extern void rcu_bh_force_quiescent_state(void);
-extern long rcu_batches_completed_bh(void);
-#else
-# define rcu_bh_force_quiescent_state rcu_force_quiescent_state
-# define rcu_batches_completed_bh rcu_batches_completed
-#endif
-
#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 5ebd0bb..de17134 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -14,7 +14,7 @@
#include <linux/linkage.h>
#include <linux/plist.h>
-#include <linux/spinlock_types_raw.h>
+#include <linux/spinlock_types.h>
extern int max_lock_depth; /* for sysctl */
@@ -29,10 +29,9 @@ struct rt_mutex {
raw_spinlock_t wait_lock;
struct plist_head wait_list;
struct task_struct *owner;
- int save_state;
#ifdef CONFIG_DEBUG_RT_MUTEXES
- const char *file;
- const char *name;
+ int save_state;
+ const char *name, *file;
int line;
void *magic;
#endif
@@ -57,39 +56,19 @@ struct hrtimer_sleeper;
#ifdef CONFIG_DEBUG_RT_MUTEXES
# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
, .name = #mutexname, .file = __FILE__, .line = __LINE__
-
-# define rt_mutex_init(mutex) \
- do { \
- raw_spin_lock_init(&(mutex)->wait_lock); \
- __rt_mutex_init(mutex, #mutex); \
- } while (0)
-
+# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__)
extern void rt_mutex_debug_task_free(struct task_struct *tsk);
#else
# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
-
-# define rt_mutex_init(mutex) \
- do { \
- raw_spin_lock_init(&(mutex)->wait_lock); \
- __rt_mutex_init(mutex, #mutex); \
- } while (0)
-
+# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL)
# define rt_mutex_debug_task_free(t) do { } while (0)
#endif
-#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
- .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+#define __RT_MUTEX_INITIALIZER(mutexname) \
+ { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
, .wait_list = PLIST_HEAD_INIT(mutexname.wait_list) \
, .owner = NULL \
- __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
-
-
-#define __RT_MUTEX_INITIALIZER(mutexname) \
- { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) }
-
-#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \
- { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
- , .save_state = 1 }
+ __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
#define DEFINE_RT_MUTEX(mutexname) \
struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
@@ -111,7 +90,6 @@ extern void rt_mutex_destroy(struct rt_mutex *lock);
extern void rt_mutex_lock(struct rt_mutex *lock);
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock,
int detect_deadlock);
-extern int rt_mutex_lock_killable(struct rt_mutex *lock, int detect_deadlock);
extern int rt_mutex_timed_lock(struct rt_mutex *lock,
struct hrtimer_sleeper *timeout,
int detect_deadlock);
diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
deleted file mode 100644
index 853ee36..0000000
--- a/include/linux/rwlock_rt.h
+++ /dev/null
@@ -1,123 +0,0 @@
-#ifndef __LINUX_RWLOCK_RT_H
-#define __LINUX_RWLOCK_RT_H
-
-#ifndef __LINUX_SPINLOCK_H
-#error Do not include directly. Use spinlock.h
-#endif
-
-#define rwlock_init(rwl) \
-do { \
- static struct lock_class_key __key; \
- \
- rt_mutex_init(&(rwl)->lock); \
- __rt_rwlock_init(rwl, #rwl, &__key); \
-} while (0)
-
-extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
-extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
-extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
-extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags);
-extern int __lockfunc rt_read_trylock(rwlock_t *rwlock);
-extern void __lockfunc rt_write_unlock(rwlock_t *rwlock);
-extern void __lockfunc rt_read_unlock(rwlock_t *rwlock);
-extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock);
-extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock);
-extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
-
-#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
-#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
-
-#define write_trylock_irqsave(lock, flags) \
- __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags))
-
-#define read_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- migrate_disable(); \
- flags = rt_read_lock_irqsave(lock); \
- } while (0)
-
-#define write_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- migrate_disable(); \
- flags = rt_write_lock_irqsave(lock); \
- } while (0)
-
-#define read_lock(lock) \
- do { \
- migrate_disable(); \
- rt_read_lock(lock); \
- } while (0)
-
-#define read_lock_bh(lock) \
- do { \
- local_bh_disable(); \
- migrate_disable(); \
- rt_read_lock(lock); \
- } while (0)
-
-#define read_lock_irq(lock) read_lock(lock)
-
-#define write_lock(lock) \
- do { \
- migrate_disable(); \
- rt_write_lock(lock); \
- } while (0)
-
-#define write_lock_bh(lock) \
- do { \
- local_bh_disable(); \
- migrate_disable(); \
- rt_write_lock(lock); \
- } while (0)
-
-#define write_lock_irq(lock) write_lock(lock)
-
-#define read_unlock(lock) \
- do { \
- rt_read_unlock(lock); \
- migrate_enable(); \
- } while (0)
-
-#define read_unlock_bh(lock) \
- do { \
- rt_read_unlock(lock); \
- migrate_enable(); \
- local_bh_enable(); \
- } while (0)
-
-#define read_unlock_irq(lock) read_unlock(lock)
-
-#define write_unlock(lock) \
- do { \
- rt_write_unlock(lock); \
- migrate_enable(); \
- } while (0)
-
-#define write_unlock_bh(lock) \
- do { \
- rt_write_unlock(lock); \
- migrate_enable(); \
- local_bh_enable(); \
- } while (0)
-
-#define write_unlock_irq(lock) write_unlock(lock)
-
-#define read_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- (void) flags; \
- rt_read_unlock(lock); \
- migrate_enable(); \
- } while (0)
-
-#define write_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- (void) flags; \
- rt_write_unlock(lock); \
- migrate_enable(); \
- } while (0)
-
-#endif
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
index d0da966..cc0072e 100644
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
@@ -1,10 +1,6 @@
#ifndef __LINUX_RWLOCK_TYPES_H
#define __LINUX_RWLOCK_TYPES_H
-#if !defined(__LINUX_SPINLOCK_TYPES_H)
-# error "Do not include directly, include spinlock_types.h"
-#endif
-
/*
* include/linux/rwlock_types.h - generic rwlock type definitions
* and initializers
@@ -47,7 +43,6 @@ typedef struct {
RW_DEP_MAP_INIT(lockname) }
#endif
-#define DEFINE_RWLOCK(name) \
- rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
+#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
#endif /* __LINUX_RWLOCK_TYPES_H */
diff --git a/include/linux/rwlock_types_rt.h b/include/linux/rwlock_types_rt.h
deleted file mode 100644
index b138321..0000000
--- a/include/linux/rwlock_types_rt.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __LINUX_RWLOCK_TYPES_RT_H
-#define __LINUX_RWLOCK_TYPES_RT_H
-
-#ifndef __LINUX_SPINLOCK_TYPES_H
-#error "Do not include directly. Include spinlock_types.h instead"
-#endif
-
-/*
- * rwlocks - rtmutex which allows single reader recursion
- */
-typedef struct {
- struct rt_mutex lock;
- int read_depth;
- unsigned int break_lock;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} rwlock_t;
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
-#else
-# define RW_DEP_MAP_INIT(lockname)
-#endif
-
-#define __RW_LOCK_UNLOCKED(name) \
- { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \
- RW_DEP_MAP_INIT(name) }
-
-#define DEFINE_RWLOCK(name) \
- rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
-
-#endif
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index f994bd3..8da67d6 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -16,10 +16,6 @@
#include <linux/atomic.h>
-#ifdef CONFIG_PREEMPT_RT_FULL
-#include <linux/rwsem_rt.h>
-#else /* PREEMPT_RT_FULL */
-
struct rw_semaphore;
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
@@ -143,6 +139,4 @@ do { \
# define down_write_nested(sem, subclass) down_write(sem)
#endif
-#endif /* !PREEMPT_RT_FULL */
-
#endif /* _LINUX_RWSEM_H */
diff --git a/include/linux/rwsem_rt.h b/include/linux/rwsem_rt.h
deleted file mode 100644
index e94d945..0000000
--- a/include/linux/rwsem_rt.h
+++ /dev/null
@@ -1,128 +0,0 @@
-#ifndef _LINUX_RWSEM_RT_H
-#define _LINUX_RWSEM_RT_H
-
-#ifndef _LINUX_RWSEM_H
-#error "Include rwsem.h"
-#endif
-
-/*
- * RW-semaphores are a spinlock plus a reader-depth count.
- *
- * Note that the semantics are different from the usual
- * Linux rw-sems, in PREEMPT_RT mode we do not allow
- * multiple readers to hold the lock at once, we only allow
- * a read-lock owner to read-lock recursively. This is
- * better for latency, makes the implementation inherently
- * fair and makes it simpler as well.
- */
-
-#include <linux/rtmutex.h>
-
-struct rw_semaphore {
- struct rt_mutex lock;
- int read_depth;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-};
-
-#define __RWSEM_INITIALIZER(name) \
- { .lock = __RT_MUTEX_INITIALIZER(name.lock), \
- RW_DEP_MAP_INIT(name) }
-
-#define DECLARE_RWSEM(lockname) \
- struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
-
-extern void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
- struct lock_class_key *key);
-
-#define __rt_init_rwsem(sem, name, key) \
- do { \
- rt_mutex_init(&(sem)->lock); \
- __rt_rwsem_init((sem), (name), (key));\
- } while (0)
-
-#define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key)
-
-# define rt_init_rwsem(sem) \
-do { \
- static struct lock_class_key __key; \
- \
- __rt_init_rwsem((sem), #sem, &__key); \
-} while (0)
-
-extern void rt_down_write(struct rw_semaphore *rwsem);
-extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
-extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
-extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
- struct lockdep_map *nest);
-extern void rt_down_read(struct rw_semaphore *rwsem);
-extern int rt_down_write_trylock(struct rw_semaphore *rwsem);
-extern int rt_down_read_trylock(struct rw_semaphore *rwsem);
-extern void rt_up_read(struct rw_semaphore *rwsem);
-extern void rt_up_write(struct rw_semaphore *rwsem);
-extern void rt_downgrade_write(struct rw_semaphore *rwsem);
-
-#define init_rwsem(sem) rt_init_rwsem(sem)
-#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock)
-
-static inline void down_read(struct rw_semaphore *sem)
-{
- rt_down_read(sem);
-}
-
-static inline int down_read_trylock(struct rw_semaphore *sem)
-{
- return rt_down_read_trylock(sem);
-}
-
-static inline void down_write(struct rw_semaphore *sem)
-{
- rt_down_write(sem);
-}
-
-static inline int down_write_trylock(struct rw_semaphore *sem)
-{
- return rt_down_write_trylock(sem);
-}
-
-static inline void up_read(struct rw_semaphore *sem)
-{
- rt_up_read(sem);
-}
-
-static inline void up_write(struct rw_semaphore *sem)
-{
- rt_up_write(sem);
-}
-
-static inline void downgrade_write(struct rw_semaphore *sem)
-{
- rt_downgrade_write(sem);
-}
-
-static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
-{
- return rt_down_read_nested(sem, subclass);
-}
-
-static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
-{
- rt_down_write_nested(sem, subclass);
-}
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-static inline void down_write_nest_lock(struct rw_semaphore *sem,
- struct rw_semaphore *nest_lock)
-{
- rt_down_write_nested_lock(sem, &nest_lock->dep_map);
-}
-
-#else
-
-static inline void down_write_nest_lock(struct rw_semaphore *sem,
- struct rw_semaphore *nest_lock)
-{
- rt_down_write_nested_lock(sem, NULL);
-}
-#endif
-#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index bcaa53f..d2112477 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -23,7 +23,6 @@ struct sched_param {
#include <linux/nodemask.h>
#include <linux/mm_types.h>
-#include <asm/kmap_types.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/cputime.h>
@@ -52,7 +51,6 @@ struct sched_param {
#include <linux/cred.h>
#include <linux/llist.h>
#include <linux/uidgid.h>
-#include <linux/hardirq.h>
#include <asm/processor.h>
@@ -165,10 +163,9 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
#define TASK_DEAD 64
#define TASK_WAKEKILL 128
#define TASK_WAKING 256
-#define TASK_PARKED 512
-#define TASK_STATE_MAX 1024
+#define TASK_STATE_MAX 512
-#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
+#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
extern char ___assert_task_state[1 - 2*!!(
sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
@@ -1064,7 +1061,6 @@ struct sched_domain;
#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
#define WF_FORK 0x02 /* child wakeup after fork */
#define WF_MIGRATED 0x04 /* internal use, task got migrated */
-#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */
#define ENQUEUE_WAKEUP 1
#define ENQUEUE_HEAD 2
@@ -1241,7 +1237,6 @@ enum perf_event_task_context {
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
- volatile long saved_state; /* saved state for "spinlock sleepers" */
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
@@ -1281,12 +1276,6 @@ struct task_struct {
#endif
unsigned int policy;
-#ifdef CONFIG_PREEMPT_RT_FULL
- int migrate_disable;
-# ifdef CONFIG_SCHED_DEBUG
- int migrate_disable_atomic;
-# endif
-#endif
int nr_cpus_allowed;
cpumask_t cpus_allowed;
@@ -1387,9 +1376,6 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
-#ifdef CONFIG_PREEMPT_RT_BASE
- struct task_struct *posix_timer_list;
-#endif
/* process credentials */
const struct cred __rcu *real_cred; /* objective and real subjective task
@@ -1421,15 +1407,10 @@ struct task_struct {
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
- struct sigqueue *sigqueue_cache;
sigset_t blocked, real_blocked;
sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
struct sigpending pending;
-#ifdef CONFIG_PREEMPT_RT_FULL
- /* TODO: move me into ->restart_block ? */
- struct siginfo forced_info;
-#endif
unsigned long sas_ss_sp;
size_t sas_ss_size;
@@ -1466,9 +1447,6 @@ struct task_struct {
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
#endif
-#ifdef CONFIG_PREEMPT_RT_FULL
- int pagefault_disabled;
-#endif
#ifdef CONFIG_TRACE_IRQFLAGS
unsigned int irq_events;
unsigned long hardirq_enable_ip;
@@ -1611,12 +1589,6 @@ struct task_struct {
unsigned long trace;
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
- u64 preempt_timestamp_hist;
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- long timer_offset;
-#endif
-#endif
#endif /* CONFIG_TRACING */
#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
struct memcg_batch_info {
@@ -1633,22 +1605,11 @@ struct task_struct {
#ifdef CONFIG_UPROBES
struct uprobe_task *utask;
#endif
-#ifdef CONFIG_PREEMPT_RT_BASE
- struct rcu_head put_rcu;
- int softirq_nestcnt;
- unsigned int softirqs_raised;
-#endif
-#ifdef CONFIG_PREEMPT_RT_FULL
-# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
- int kmap_idx;
- pte_t kmap_pte[KM_TYPE_NR];
-# endif
-#endif
-#ifdef CONFIG_DEBUG_PREEMPT
- unsigned long preempt_disable_ip;
-#endif
};
+/* Future-safe accessor for struct task_struct's cpus_allowed. */
+#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
+
#ifdef CONFIG_NUMA_BALANCING
extern void task_numa_fault(int node, int pages, bool migrated);
extern void set_numabalancing_state(bool enabled);
@@ -1661,17 +1622,6 @@ static inline void set_numabalancing_state(bool enabled)
}
#endif
-#ifdef CONFIG_PREEMPT_RT_FULL
-static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; }
-#else
-static inline bool cur_pf_disabled(void) { return false; }
-#endif
-
-static inline bool pagefault_disabled(void)
-{
- return in_atomic() || cur_pf_disabled();
-}
-
/*
* Priority of a process goes from 0..MAX_PRIO-1, valid RT
* priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
@@ -1834,15 +1784,6 @@ extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
-#ifdef CONFIG_PREEMPT_RT_BASE
-extern void __put_task_struct_cb(struct rcu_head *rhp);
-
-static inline void put_task_struct(struct task_struct *t)
-{
- if (atomic_dec_and_test(&t->usage))
- call_rcu(&t->put_rcu, __put_task_struct_cb);
-}
-#else
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
@@ -1850,7 +1791,6 @@ static inline void put_task_struct(struct task_struct *t)
if (atomic_dec_and_test(&t->usage))
__put_task_struct(t);
}
-#endif
extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
@@ -1858,7 +1798,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
/*
* Per process flags
*/
-#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
@@ -1876,7 +1815,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
#define PF_KSWAPD 0x00040000 /* I am kswapd */
-#define PF_STOMPER 0x00080000 /* I am a stomp machine thread */
#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
@@ -1983,10 +1921,6 @@ extern void do_set_cpus_allowed(struct task_struct *p,
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
-int migrate_me(void);
-void tell_sched_cpu_down_begin(int cpu);
-void tell_sched_cpu_down_done(int cpu);
-
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
@@ -1999,9 +1933,6 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
return -EINVAL;
return 0;
}
-static inline int migrate_me(void) { return 0; }
-static inline void tell_sched_cpu_down_begin(int cpu) { }
-static inline void tell_sched_cpu_down_done(int cpu) { }
#endif
#ifdef CONFIG_NO_HZ
@@ -2176,7 +2107,6 @@ extern unsigned int sysctl_sched_cfs_bandwidth_slice;
#ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
-extern int rt_mutex_check_prio(struct task_struct *task, int newprio);
extern void rt_mutex_adjust_pi(struct task_struct *p);
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
{
@@ -2187,10 +2117,6 @@ static inline int rt_mutex_getprio(struct task_struct *p)
{
return p->normal_prio;
}
-static inline int rt_mutex_check_prio(struct task_struct *task, int newprio)
-{
- return 0;
-}
# define rt_mutex_adjust_pi(p) do { } while (0)
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
{
@@ -2282,7 +2208,6 @@ extern void xtime_update(unsigned long ticks);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
-extern int wake_up_lock_sleeper(struct task_struct * tsk);
extern void wake_up_new_task(struct task_struct *tsk);
#ifdef CONFIG_SMP
extern void kick_process(struct task_struct *tsk);
@@ -2387,24 +2312,12 @@ extern struct mm_struct * mm_alloc(void);
/* mmdrop drops the mm and the page tables */
extern void __mmdrop(struct mm_struct *);
-
static inline void mmdrop(struct mm_struct * mm)
{
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
__mmdrop(mm);
}
-#ifdef CONFIG_PREEMPT_RT_BASE
-extern void __mmdrop_delayed(struct rcu_head *rhp);
-static inline void mmdrop_delayed(struct mm_struct *mm)
-{
- if (atomic_dec_and_test(&mm->mm_count))
- call_rcu(&mm->delayed_drop, __mmdrop_delayed);
-}
-#else
-# define mmdrop_delayed(mm) mmdrop(mm)
-#endif
-
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */
@@ -2572,18 +2485,27 @@ static inline void threadgroup_change_end(struct task_struct *tsk)
*
* Lock the threadgroup @tsk belongs to. No new task is allowed to enter
* and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
- * change ->group_leader/pid. This is useful for cases where the threadgroup
- * needs to stay stable across blockable operations.
+ * perform exec. This is useful for cases where the threadgroup needs to
+ * stay stable across blockable operations.
*
* fork and exit paths explicitly call threadgroup_change_{begin|end}() for
* synchronization. While held, no new task will be added to threadgroup
* and no existing live task will have its PF_EXITING set.
*
- * de_thread() does threadgroup_change_{begin|end}() when a non-leader
- * sub-thread becomes a new leader.
+ * During exec, a task goes and puts its thread group through unusual
+ * changes. After de-threading, exclusive access is assumed to resources
+ * which are usually shared by tasks in the same group - e.g. sighand may
+ * be replaced with a new one. Also, the exec'ing task takes over group
+ * leader role including its pid. Exclude these changes while locked by
+ * grabbing cred_guard_mutex which is used to synchronize exec path.
*/
static inline void threadgroup_lock(struct task_struct *tsk)
{
+ /*
+ * exec uses exit for de-threading nesting group_rwsem inside
+ * cred_guard_mutex. Grab cred_guard_mutex first.
+ */
+ mutex_lock(&tsk->signal->cred_guard_mutex);
down_write(&tsk->signal->group_rwsem);
}
@@ -2596,6 +2518,7 @@ static inline void threadgroup_lock(struct task_struct *tsk)
static inline void threadgroup_unlock(struct task_struct *tsk)
{
up_write(&tsk->signal->group_rwsem);
+ mutex_unlock(&tsk->signal->cred_guard_mutex);
}
#else
static inline void threadgroup_change_begin(struct task_struct *tsk) {}
@@ -2687,52 +2610,6 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
-#ifdef CONFIG_PREEMPT_LAZY
-static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
-{
- set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
-}
-
-static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
-{
- clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
-}
-
-static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
-{
- return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
-}
-
-static inline int need_resched_lazy(void)
-{
- return test_thread_flag(TIF_NEED_RESCHED_LAZY);
-}
-
-static inline int need_resched_now(void)
-{
- return test_thread_flag(TIF_NEED_RESCHED);
-}
-
-static inline int need_resched(void)
-{
- return test_thread_flag(TIF_NEED_RESCHED) ||
- test_thread_flag(TIF_NEED_RESCHED_LAZY);
-}
-#else
-static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
-static inline int need_resched_lazy(void) { return 0; }
-
-static inline int need_resched_now(void)
-{
- return test_thread_flag(TIF_NEED_RESCHED);
-}
-
-static inline int need_resched(void)
-{
- return test_thread_flag(TIF_NEED_RESCHED);
-}
-#endif
-
static inline int restart_syscall(void)
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
@@ -2764,6 +2641,11 @@ static inline int signal_pending_state(long state, struct task_struct *p)
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}
+static inline int need_resched(void)
+{
+ return unlikely(test_thread_flag(TIF_NEED_RESCHED));
+}
+
/*
* cond_resched() and cond_resched_lock(): latency reduction via
* explicit rescheduling in places that are safe. The return
@@ -2780,7 +2662,7 @@ extern int _cond_resched(void);
extern int __cond_resched_lock(spinlock_t *lock);
-#if defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT_FULL)
+#ifdef CONFIG_PREEMPT_COUNT
#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
#else
#define PREEMPT_LOCK_OFFSET 0
@@ -2791,16 +2673,12 @@ extern int __cond_resched_lock(spinlock_t *lock);
__cond_resched_lock(lock); \
})
-#ifndef CONFIG_PREEMPT_RT_FULL
extern int __cond_resched_softirq(void);
#define cond_resched_softirq() ({ \
__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
__cond_resched_softirq(); \
})
-#else
-# define cond_resched_softirq() cond_resched()
-#endif
/*
* Does a critical section need to be broken due to another
@@ -2872,26 +2750,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
-static inline int __migrate_disabled(struct task_struct *p)
-{
-#ifdef CONFIG_PREEMPT_RT_FULL
- return p->migrate_disable;
-#else
- return 0;
-#endif
-}
-
-/* Future-safe accessor for struct task_struct's cpus_allowed. */
-static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
-{
-#ifdef CONFIG_PREEMPT_RT_FULL
- if (p->migrate_disable)
- return cpumask_of(task_cpu(p));
-#endif
-
- return &p->cpus_allowed;
-}
-
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 939ea1a..600060e2 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -30,12 +30,92 @@
#include <linux/preempt.h>
#include <asm/processor.h>
+typedef struct {
+ unsigned sequence;
+ spinlock_t lock;
+} seqlock_t;
+
+/*
+ * These macros triggered gcc-3.x compile-time problems. We think these are
+ * OK now. Be cautious.
+ */
+#define __SEQLOCK_UNLOCKED(lockname) \
+ { 0, __SPIN_LOCK_UNLOCKED(lockname) }
+
+#define seqlock_init(x) \
+ do { \
+ (x)->sequence = 0; \
+ spin_lock_init(&(x)->lock); \
+ } while (0)
+
+#define DEFINE_SEQLOCK(x) \
+ seqlock_t x = __SEQLOCK_UNLOCKED(x)
+
+/* Lock out other writers and update the count.
+ * Acts like a normal spin_lock/unlock.
+ * Don't need preempt_disable() because that is in the spin_lock already.
+ */
+static inline void write_seqlock(seqlock_t *sl)
+{
+ spin_lock(&sl->lock);
+ ++sl->sequence;
+ smp_wmb();
+}
+
+static inline void write_sequnlock(seqlock_t *sl)
+{
+ smp_wmb();
+ sl->sequence++;
+ spin_unlock(&sl->lock);
+}
+
+static inline int write_tryseqlock(seqlock_t *sl)
+{
+ int ret = spin_trylock(&sl->lock);
+
+ if (ret) {
+ ++sl->sequence;
+ smp_wmb();
+ }
+ return ret;
+}
+
+/* Start of read calculation -- fetch last complete writer token */
+static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
+{
+ unsigned ret;
+
+repeat:
+ ret = ACCESS_ONCE(sl->sequence);
+ if (unlikely(ret & 1)) {
+ cpu_relax();
+ goto repeat;
+ }
+ smp_rmb();
+
+ return ret;
+}
+
+/*
+ * Test if reader processed invalid data.
+ *
+ * If sequence value changed then writer changed data while in section.
+ */
+static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
+{
+ smp_rmb();
+
+ return unlikely(sl->sequence != start);
+}
+
+
/*
* Version using sequence counter only.
* This can be used when code has its own mutex protecting the
* updating starting before the write_seqcountbeqin() and ending
* after the write_seqcount_end().
*/
+
typedef struct seqcount {
unsigned sequence;
} seqcount_t;
@@ -138,6 +218,7 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
{
smp_rmb();
+
return __read_seqcount_retry(s, start);
}
@@ -146,30 +227,18 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
* Sequence counter only version assumes that callers are using their
* own mutexing.
*/
-static inline void __write_seqcount_begin(seqcount_t *s)
+static inline void write_seqcount_begin(seqcount_t *s)
{
s->sequence++;
smp_wmb();
}
-static inline void write_seqcount_begin(seqcount_t *s)
-{
- preempt_disable_rt();
- __write_seqcount_begin(s);
-}
-
-static inline void __write_seqcount_end(seqcount_t *s)
+static inline void write_seqcount_end(seqcount_t *s)
{
smp_wmb();
s->sequence++;
}
-static inline void write_seqcount_end(seqcount_t *s)
-{
- __write_seqcount_end(s);
- preempt_enable_rt();
-}
-
/**
* write_seqcount_barrier - invalidate in-progress read-side seq operations
* @s: pointer to seqcount_t
@@ -183,124 +252,31 @@ static inline void write_seqcount_barrier(seqcount_t *s)
s->sequence+=2;
}
-typedef struct {
- struct seqcount seqcount;
- spinlock_t lock;
-} seqlock_t;
-
-/*
- * These macros triggered gcc-3.x compile-time problems. We think these are
- * OK now. Be cautious.
- */
-#define __SEQLOCK_UNLOCKED(lockname) \
- { \
- .seqcount = SEQCNT_ZERO, \
- .lock = __SPIN_LOCK_UNLOCKED(lockname) \
- }
-
-#define seqlock_init(x) \
- do { \
- seqcount_init(&(x)->seqcount); \
- spin_lock_init(&(x)->lock); \
- } while (0)
-
-#define DEFINE_SEQLOCK(x) \
- seqlock_t x = __SEQLOCK_UNLOCKED(x)
-
-/*
- * Read side functions for starting and finalizing a read side section.
- */
-#ifndef CONFIG_PREEMPT_RT_FULL
-static inline unsigned read_seqbegin(const seqlock_t *sl)
-{
- return read_seqcount_begin(&sl->seqcount);
-}
-#else
-/*
- * Starvation safe read side for RT
- */
-static inline unsigned read_seqbegin(seqlock_t *sl)
-{
- unsigned ret;
-
-repeat:
- ret = ACCESS_ONCE(sl->seqcount.sequence);
- if (unlikely(ret & 1)) {
- /*
- * Take the lock and let the writer proceed (i.e. evtl
- * boost it), otherwise we could loop here forever.
- */
- spin_lock(&sl->lock);
- spin_unlock(&sl->lock);
- goto repeat;
- }
- return ret;
-}
-#endif
-
-static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
-{
- return read_seqcount_retry(&sl->seqcount, start);
-}
-
/*
- * Lock out other writers and update the count.
- * Acts like a normal spin_lock/unlock.
- * Don't need preempt_disable() because that is in the spin_lock already.
+ * Possible sw/hw IRQ protected versions of the interfaces.
*/
-static inline void write_seqlock(seqlock_t *sl)
-{
- spin_lock(&sl->lock);
- __write_seqcount_begin(&sl->seqcount);
-}
-
-static inline void write_sequnlock(seqlock_t *sl)
-{
- __write_seqcount_end(&sl->seqcount);
- spin_unlock(&sl->lock);
-}
-
-static inline void write_seqlock_bh(seqlock_t *sl)
-{
- spin_lock_bh(&sl->lock);
- __write_seqcount_begin(&sl->seqcount);
-}
-
-static inline void write_sequnlock_bh(seqlock_t *sl)
-{
- __write_seqcount_end(&sl->seqcount);
- spin_unlock_bh(&sl->lock);
-}
-
-static inline void write_seqlock_irq(seqlock_t *sl)
-{
- spin_lock_irq(&sl->lock);
- __write_seqcount_begin(&sl->seqcount);
-}
-
-static inline void write_sequnlock_irq(seqlock_t *sl)
-{
- __write_seqcount_end(&sl->seqcount);
- spin_unlock_irq(&sl->lock);
-}
-
-static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sl->lock, flags);
- __write_seqcount_begin(&sl->seqcount);
- return flags;
-}
-
#define write_seqlock_irqsave(lock, flags) \
- do { flags = __write_seqlock_irqsave(lock); } while (0)
-
-static inline void
-write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
-{
- __write_seqcount_end(&sl->seqcount);
- spin_unlock_irqrestore(&sl->lock, flags);
-}
+ do { local_irq_save(flags); write_seqlock(lock); } while (0)
+#define write_seqlock_irq(lock) \
+ do { local_irq_disable(); write_seqlock(lock); } while (0)
+#define write_seqlock_bh(lock) \
+ do { local_bh_disable(); write_seqlock(lock); } while (0)
+
+#define write_sequnlock_irqrestore(lock, flags) \
+ do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
+#define write_sequnlock_irq(lock) \
+ do { write_sequnlock(lock); local_irq_enable(); } while(0)
+#define write_sequnlock_bh(lock) \
+ do { write_sequnlock(lock); local_bh_enable(); } while(0)
+
+#define read_seqbegin_irqsave(lock, flags) \
+ ({ local_irq_save(flags); read_seqbegin(lock); })
+
+#define read_seqretry_irqrestore(lock, iv, flags) \
+ ({ \
+ int ret = read_seqretry(lock, iv); \
+ local_irq_restore(flags); \
+ ret; \
+ })
#endif /* __LINUX_SEQLOCK_H */
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 8942895..0a89ffc 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -226,7 +226,6 @@ static inline void init_sigpending(struct sigpending *sig)
}
extern void flush_sigqueue(struct sigpending *queue);
-extern void flush_task_sigqueue(struct task_struct *tsk);
/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
static inline int valid_signal(unsigned long sig)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 3da99c9b..320e976 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -132,7 +132,6 @@ struct sk_buff_head {
__u32 qlen;
spinlock_t lock;
- raw_spinlock_t raw_lock;
};
struct sk_buff;
@@ -492,7 +491,7 @@ struct sk_buff {
union {
__u32 mark;
__u32 dropcount;
- __u32 reserved_tailroom;
+ __u32 avail_size;
};
sk_buff_data_t inner_transport_header;
@@ -1009,12 +1008,6 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
__skb_queue_head_init(list);
}
-static inline void skb_queue_head_init_raw(struct sk_buff_head *list)
-{
- raw_spin_lock_init(&list->raw_lock);
- __skb_queue_head_init(list);
-}
-
static inline void skb_queue_head_init_class(struct sk_buff_head *list,
struct lock_class_key *class)
{
@@ -1276,13 +1269,11 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
* do not lose pfmemalloc information as the pages would not be
* allocated using __GFP_MEMALLOC.
*/
+ if (page->pfmemalloc && !page->mapping)
+ skb->pfmemalloc = true;
frag->page.p = page;
frag->page_offset = off;
skb_frag_size_set(frag, size);
-
- page = compound_head(page);
- if (page->pfmemalloc && !page->mapping)
- skb->pfmemalloc = true;
}
/**
@@ -1437,10 +1428,7 @@ static inline int skb_tailroom(const struct sk_buff *skb)
*/
static inline int skb_availroom(const struct sk_buff *skb)
{
- if (skb_is_nonlinear(skb))
- return 0;
-
- return skb->end - skb->tail - skb->reserved_tailroom;
+ return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len;
}
/**
@@ -2604,13 +2592,6 @@ static inline void nf_reset(struct sk_buff *skb)
#endif
}
-static inline void nf_reset_trace(struct sk_buff *skb)
-{
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
- skb->nf_trace = 0;
-#endif
-}
-
/* Note: This doesn't put any conntrack and bridge info in dst. */
static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
{
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index a58ad34..9db4825 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -54,7 +54,7 @@ struct kmem_cache_cpu {
};
struct kmem_cache_node {
- raw_spinlock_t list_lock; /* Protect partial list and nr_partial */
+ spinlock_t list_lock; /* Protect partial list and nr_partial */
unsigned long nr_partial;
struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 9e3255b..dd6f06b 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -218,9 +218,6 @@ static inline void kick_all_cpus_sync(void) { }
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
#define put_cpu() preempt_enable()
-#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
-#define put_cpu_light() migrate_enable()
-
/*
* Callback to arch code if there's nosmp or maxcpus=0 on the
* boot command line:
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 0c11a7c..7d537ce 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -254,11 +254,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
/* Include rwlock functions */
-#ifdef CONFIG_PREEMPT_RT_FULL
-# include <linux/rwlock_rt.h>
-#else
-# include <linux/rwlock.h>
-#endif
+#include <linux/rwlock.h>
/*
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
@@ -269,10 +265,6 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
# include <linux/spinlock_api_up.h>
#endif
-#ifdef CONFIG_PREEMPT_RT_FULL
-# include <linux/spinlock_rt.h>
-#else /* PREEMPT_RT_FULL */
-
/*
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
@@ -402,6 +394,4 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
-#endif /* !PREEMPT_RT_FULL */
-
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 3f68f50..51df117 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -191,8 +191,6 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
return 0;
}
-#ifndef CONFIG_PREEMPT_RT_FULL
-# include <linux/rwlock_api_smp.h>
-#endif
+#include <linux/rwlock_api_smp.h>
#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
deleted file mode 100644
index 0618387..0000000
--- a/include/linux/spinlock_rt.h
+++ /dev/null
@@ -1,168 +0,0 @@
-#ifndef __LINUX_SPINLOCK_RT_H
-#define __LINUX_SPINLOCK_RT_H
-
-#ifndef __LINUX_SPINLOCK_H
-#error Do not include directly. Use spinlock.h
-#endif
-
-#include <linux/bug.h>
-
-extern void
-__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
-
-#define spin_lock_init(slock) \
-do { \
- static struct lock_class_key __key; \
- \
- rt_mutex_init(&(slock)->lock); \
- __rt_spin_lock_init(slock, #slock, &__key); \
-} while (0)
-
-extern void __lockfunc rt_spin_lock(spinlock_t *lock);
-extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
-extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
-extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
-extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
-extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
-extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
-extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
-extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
-
-/*
- * lockdep-less calls, for derived types like rwlock:
- * (for trylock they can use rt_mutex_trylock() directly.
- */
-extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
-extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
-
-#define spin_lock_local(lock) rt_spin_lock(lock)
-#define spin_unlock_local(lock) rt_spin_unlock(lock)
-
-#define spin_lock(lock) \
- do { \
- migrate_disable(); \
- rt_spin_lock(lock); \
- } while (0)
-
-#define spin_lock_bh(lock) \
- do { \
- local_bh_disable(); \
- migrate_disable(); \
- rt_spin_lock(lock); \
- } while (0)
-
-#define spin_lock_irq(lock) spin_lock(lock)
-
-#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock))
-
-#define spin_trylock(lock) \
-({ \
- int __locked; \
- migrate_disable(); \
- __locked = spin_do_trylock(lock); \
- if (!__locked) \
- migrate_enable(); \
- __locked; \
-})
-
-#ifdef CONFIG_LOCKDEP
-# define spin_lock_nested(lock, subclass) \
- do { \
- migrate_disable(); \
- rt_spin_lock_nested(lock, subclass); \
- } while (0)
-
-# define spin_lock_irqsave_nested(lock, flags, subclass) \
- do { \
- typecheck(unsigned long, flags); \
- flags = 0; \
- migrate_disable(); \
- rt_spin_lock_nested(lock, subclass); \
- } while (0)
-#else
-# define spin_lock_nested(lock, subclass) spin_lock(lock)
-
-# define spin_lock_irqsave_nested(lock, flags, subclass) \
- do { \
- typecheck(unsigned long, flags); \
- flags = 0; \
- spin_lock(lock); \
- } while (0)
-#endif
-
-#define spin_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- flags = 0; \
- spin_lock(lock); \
- } while (0)
-
-static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
-{
- unsigned long flags = 0;
-#ifdef CONFIG_TRACE_IRQFLAGS
- flags = rt_spin_lock_trace_flags(lock);
-#else
- spin_lock(lock); /* lock_local */
-#endif
- return flags;
-}
-
-/* FIXME: we need rt_spin_lock_nest_lock */
-#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
-
-#define spin_unlock(lock) \
- do { \
- rt_spin_unlock(lock); \
- migrate_enable(); \
- } while (0)
-
-#define spin_unlock_bh(lock) \
- do { \
- rt_spin_unlock(lock); \
- migrate_enable(); \
- local_bh_enable(); \
- } while (0)
-
-#define spin_unlock_irq(lock) spin_unlock(lock)
-
-#define spin_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- (void) flags; \
- spin_unlock(lock); \
- } while (0)
-
-#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock))
-#define spin_trylock_irq(lock) spin_trylock(lock)
-
-#define spin_trylock_irqsave(lock, flags) \
- rt_spin_trylock_irqsave(lock, &(flags))
-
-#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock)
-
-#ifdef CONFIG_GENERIC_LOCKBREAK
-# define spin_is_contended(lock) ((lock)->break_lock)
-#else
-# define spin_is_contended(lock) (((void)(lock), 0))
-#endif
-
-static inline int spin_can_lock(spinlock_t *lock)
-{
- return !rt_mutex_is_locked(&lock->lock);
-}
-
-static inline int spin_is_locked(spinlock_t *lock)
-{
- return rt_mutex_is_locked(&lock->lock);
-}
-
-static inline void assert_spin_locked(spinlock_t *lock)
-{
- BUG_ON(!spin_is_locked(lock));
-}
-
-#define atomic_dec_and_lock(atomic, lock) \
- atomic_dec_and_spin_lock(atomic, lock)
-
-#endif
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 10bac71..73548eb 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -9,15 +9,80 @@
* Released under the General Public License (GPL).
*/
-#include <linux/spinlock_types_raw.h>
+#if defined(CONFIG_SMP)
+# include <asm/spinlock_types.h>
+#else
+# include <linux/spinlock_types_up.h>
+#endif
+
+#include <linux/lockdep.h>
+
+typedef struct raw_spinlock {
+ arch_spinlock_t raw_lock;
+#ifdef CONFIG_GENERIC_LOCKBREAK
+ unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned int magic, owner_cpu;
+ void *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} raw_spinlock_t;
+
+#define SPINLOCK_MAGIC 0xdead4ead
+
+#define SPINLOCK_OWNER_INIT ((void *)-1L)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#else
+# define SPIN_DEP_MAP_INIT(lockname)
+#endif
-#ifndef CONFIG_PREEMPT_RT_FULL
-# include <linux/spinlock_types_nort.h>
-# include <linux/rwlock_types.h>
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define SPIN_DEBUG_INIT(lockname) \
+ .magic = SPINLOCK_MAGIC, \
+ .owner_cpu = -1, \
+ .owner = SPINLOCK_OWNER_INIT,
#else
-# include <linux/rtmutex.h>
-# include <linux/spinlock_types_rt.h>
-# include <linux/rwlock_types_rt.h>
+# define SPIN_DEBUG_INIT(lockname)
#endif
+#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
+ { \
+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+ SPIN_DEBUG_INIT(lockname) \
+ SPIN_DEP_MAP_INIT(lockname) }
+
+#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
+ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+
+typedef struct spinlock {
+ union {
+ struct raw_spinlock rlock;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
+ struct {
+ u8 __padding[LOCK_PADSIZE];
+ struct lockdep_map dep_map;
+ };
+#endif
+ };
+} spinlock_t;
+
+#define __SPIN_LOCK_INITIALIZER(lockname) \
+ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
+
+#define __SPIN_LOCK_UNLOCKED(lockname) \
+ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+
+#include <linux/rwlock_types.h>
+
#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_nort.h b/include/linux/spinlock_types_nort.h
deleted file mode 100644
index f1dac1f..0000000
--- a/include/linux/spinlock_types_nort.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __LINUX_SPINLOCK_TYPES_NORT_H
-#define __LINUX_SPINLOCK_TYPES_NORT_H
-
-#ifndef __LINUX_SPINLOCK_TYPES_H
-#error "Do not include directly. Include spinlock_types.h instead"
-#endif
-
-/*
- * The non RT version maps spinlocks to raw_spinlocks
- */
-typedef struct spinlock {
- union {
- struct raw_spinlock rlock;
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
- struct {
- u8 __padding[LOCK_PADSIZE];
- struct lockdep_map dep_map;
- };
-#endif
- };
-} spinlock_t;
-
-#define __SPIN_LOCK_INITIALIZER(lockname) \
- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
-
-#define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
-
-#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
-
-#endif
diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
deleted file mode 100644
index edffc4d..0000000
--- a/include/linux/spinlock_types_raw.h
+++ /dev/null
@@ -1,56 +0,0 @@
-#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
-#define __LINUX_SPINLOCK_TYPES_RAW_H
-
-#if defined(CONFIG_SMP)
-# include <asm/spinlock_types.h>
-#else
-# include <linux/spinlock_types_up.h>
-#endif
-
-#include <linux/lockdep.h>
-
-typedef struct raw_spinlock {
- arch_spinlock_t raw_lock;
-#ifdef CONFIG_GENERIC_LOCKBREAK
- unsigned int break_lock;
-#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- unsigned int magic, owner_cpu;
- void *owner;
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} raw_spinlock_t;
-
-#define SPINLOCK_MAGIC 0xdead4ead
-
-#define SPINLOCK_OWNER_INIT ((void *)-1L)
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
-#else
-# define SPIN_DEP_MAP_INIT(lockname)
-#endif
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-# define SPIN_DEBUG_INIT(lockname) \
- .magic = SPINLOCK_MAGIC, \
- .owner_cpu = -1, \
- .owner = SPINLOCK_OWNER_INIT,
-#else
-# define SPIN_DEBUG_INIT(lockname)
-#endif
-
-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
- { \
- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
- SPIN_DEBUG_INIT(lockname) \
- SPIN_DEP_MAP_INIT(lockname) }
-
-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
-
-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
-
-#endif
diff --git a/include/linux/spinlock_types_rt.h b/include/linux/spinlock_types_rt.h
deleted file mode 100644
index 9fd4319..0000000
--- a/include/linux/spinlock_types_rt.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef __LINUX_SPINLOCK_TYPES_RT_H
-#define __LINUX_SPINLOCK_TYPES_RT_H
-
-#ifndef __LINUX_SPINLOCK_TYPES_H
-#error "Do not include directly. Include spinlock_types.h instead"
-#endif
-
-#include <linux/cache.h>
-
-/*
- * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field:
- */
-typedef struct spinlock {
- struct rt_mutex lock;
- unsigned int break_lock;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} spinlock_t;
-
-#ifdef CONFIG_DEBUG_RT_MUTEXES
-# define __RT_SPIN_INITIALIZER(name) \
- { \
- .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
- .save_state = 1, \
- .file = __FILE__, \
- .line = __LINE__ , \
- }
-#else
-# define __RT_SPIN_INITIALIZER(name) \
- { \
- .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
- .save_state = 1, \
- }
-#endif
-
-/*
-.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock)
-*/
-
-#define __SPIN_LOCK_UNLOCKED(name) \
- { .lock = __RT_SPIN_INITIALIZER(name.lock), \
- SPIN_DEP_MAP_INIT(name) }
-
-#define __DEFINE_SPINLOCK(name) \
- spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
-
-#define DEFINE_SPINLOCK(name) \
- spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name)
-
-#endif
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index e2369c1..a26e2fb 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -16,10 +16,7 @@
* In the debug case, 1 means unlocked, 0 means locked. (the values
* are inverted, to catch initialization bugs)
*
- * No atomicity anywhere, we are on UP. However, we still need
- * the compiler barriers, because we do not want the compiler to
- * move potentially faulting instructions (notably user accesses)
- * into the locked sequence, resulting in non-atomic execution.
+ * No atomicity anywhere, we are on UP.
*/
#ifdef CONFIG_DEBUG_SPINLOCK
@@ -28,7 +25,6 @@
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
lock->slock = 0;
- barrier();
}
static inline void
@@ -36,7 +32,6 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
local_irq_save(flags);
lock->slock = 0;
- barrier();
}
static inline int arch_spin_trylock(arch_spinlock_t *lock)
@@ -44,34 +39,32 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
char oldval = lock->slock;
lock->slock = 0;
- barrier();
return oldval > 0;
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
- barrier();
lock->slock = 1;
}
/*
* Read-write spinlocks. No debug version.
*/
-#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0)
-#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0)
-#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
-#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; })
-#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0)
-#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
+#define arch_read_lock(lock) do { (void)(lock); } while (0)
+#define arch_write_lock(lock) do { (void)(lock); } while (0)
+#define arch_read_trylock(lock) ({ (void)(lock); 1; })
+#define arch_write_trylock(lock) ({ (void)(lock); 1; })
+#define arch_read_unlock(lock) do { (void)(lock); } while (0)
+#define arch_write_unlock(lock) do { (void)(lock); } while (0)
#else /* DEBUG_SPINLOCK */
#define arch_spin_is_locked(lock) ((void)(lock), 0)
/* for sched.c and kernel_lock.c: */
-# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
-# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
-# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
-# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; })
+# define arch_spin_lock(lock) do { (void)(lock); } while (0)
+# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
+# define arch_spin_unlock(lock) do { (void)(lock); } while (0)
+# define arch_spin_trylock(lock) ({ (void)(lock); 1; })
#endif /* DEBUG_SPINLOCK */
#define arch_spin_is_contended(lock) (((void)(lock), 0))
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index fe9efd4..6eb691b 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -84,10 +84,10 @@ int init_srcu_struct(struct srcu_struct *sp);
void process_srcu(struct work_struct *work);
-#define __SRCU_STRUCT_INIT(name, pcpu_name) \
+#define __SRCU_STRUCT_INIT(name) \
{ \
.completed = -300, \
- .per_cpu_ref = &pcpu_name, \
+ .per_cpu_ref = &name##_srcu_array, \
.queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \
.running = false, \
.batch_queue = RCU_BATCH_INIT(name.batch_queue), \
@@ -102,13 +102,13 @@ void process_srcu(struct work_struct *work);
* define and init a srcu struct at build time.
* dont't call init_srcu_struct() nor cleanup_srcu_struct() on it.
*/
-#define _DEFINE_SRCU(name, mod) \
+#define DEFINE_SRCU(name) \
static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
- mod struct srcu_struct name = \
- __SRCU_STRUCT_INIT(name, name##_srcu_array);
+ struct srcu_struct name = __SRCU_STRUCT_INIT(name);
-#define DEFINE_SRCU(name) _DEFINE_SRCU(name, )
-#define DEFINE_STATIC_SRCU(name) _DEFINE_SRCU(name, static)
+#define DEFINE_STATIC_SRCU(name) \
+ static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
+ static struct srcu_struct name = __SRCU_STRUCT_INIT(name);
/**
* call_srcu() - Queue a callback for invocation after an SRCU grace period
diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h
index 6fcfe99..9e492be 100644
--- a/include/linux/ssb/ssb_driver_chipcommon.h
+++ b/include/linux/ssb/ssb_driver_chipcommon.h
@@ -219,7 +219,6 @@
#define SSB_CHIPCO_PMU_CTL 0x0600 /* PMU control */
#define SSB_CHIPCO_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */
#define SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT 16
-#define SSB_CHIPCO_PMU_CTL_PLL_UPD 0x00000400
#define SSB_CHIPCO_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */
#define SSB_CHIPCO_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */
#define SSB_CHIPCO_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */
@@ -668,6 +667,5 @@ enum ssb_pmu_ldo_volt_id {
void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc,
enum ssb_pmu_ldo_volt_id id, u32 voltage);
void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on);
-void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid);
#endif /* LINUX_SSB_CHIPCO_H_ */
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index b15655f..14a8ff2 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -25,7 +25,6 @@
#include <linux/rcupdate.h>
#include <linux/wait.h>
#include <linux/rbtree.h>
-#include <linux/atomic.h>
#include <uapi/linux/sysctl.h>
/* For the /proc/sys support */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 90a8dfa..fe82022 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -44,7 +44,7 @@
/* Adding event notification support elements */
#define THERMAL_GENL_FAMILY_NAME "thermal_event"
#define THERMAL_GENL_VERSION 0x01
-#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp"
+#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_group"
/* Default Thermal Governor */
#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 2adf9c3..e1d558e 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -20,8 +20,6 @@ struct timekeeper {
u32 shift;
/* Number of clock cycles in one NTP interval. */
cycle_t cycle_interval;
- /* Last cycle value (also stored in clock->cycle_last) */
- cycle_t cycle_last;
/* Number of clock shifted nano seconds in one NTP interval. */
u64 xtime_interval;
/* shifted nano seconds left over when rounding cycle_interval */
@@ -64,6 +62,8 @@ struct timekeeper {
ktime_t offs_boot;
/* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
struct timespec raw_time;
+ /* Seqlock for all timekeeper values */
+ seqlock_t lock;
};
static inline struct timespec tk_xtime(struct timekeeper *tk)
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 5fcd72c..8c5a197 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -241,7 +241,7 @@ extern void add_timer(struct timer_list *timer);
extern int try_to_del_timer_sync(struct timer_list *timer);
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+#ifdef CONFIG_SMP
extern int del_timer_sync(struct timer_list *timer);
#else
# define del_timer_sync(t) del_timer(t)
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 44b3751..5ca0951 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -6,37 +6,38 @@
/*
* These routines enable/disable the pagefault handler in that
- * it will not take any MM locks and go straight to the fixup table.
+ * it will not take any locks and go straight to the fixup table.
+ *
+ * They have great resemblance to the preempt_disable/enable calls
+ * and in fact they are identical; this is because currently there is
+ * no other way to make the pagefault handlers do this. So we do
+ * disable preemption but we don't necessarily care about that.
*/
-static inline void raw_pagefault_disable(void)
+static inline void pagefault_disable(void)
{
inc_preempt_count();
+ /*
+ * make sure to have issued the store before a pagefault
+ * can hit.
+ */
barrier();
}
-static inline void raw_pagefault_enable(void)
+static inline void pagefault_enable(void)
{
+ /*
+ * make sure to issue those last loads/stores before enabling
+ * the pagefault handler again.
+ */
barrier();
dec_preempt_count();
+ /*
+ * make sure we do..
+ */
barrier();
preempt_check_resched();
}
-#ifndef CONFIG_PREEMPT_RT_FULL
-static inline void pagefault_disable(void)
-{
- raw_pagefault_disable();
-}
-
-static inline void pagefault_enable(void)
-{
- raw_pagefault_enable();
-}
-#else
-extern void pagefault_disable(void);
-extern void pagefault_enable(void);
-#endif
-
#ifndef ARCH_HAS_NOCACHE_UACCESS
static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
@@ -76,9 +77,9 @@ static inline unsigned long __copy_from_user_nocache(void *to,
mm_segment_t old_fs = get_fs(); \
\
set_fs(KERNEL_DS); \
- raw_pagefault_disable(); \
+ pagefault_disable(); \
ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
- raw_pagefault_enable(); \
+ pagefault_enable(); \
set_fs(old_fs); \
ret; \
})
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index ef03e33..4f628a6 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -26,7 +26,6 @@
#include <linux/errno.h>
#include <linux/rbtree.h>
-#include <linux/wait.h>
struct vm_area_struct;
struct mm_struct;
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index cc25b70..3b8f9d4 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -127,7 +127,6 @@ struct cdc_ncm_ctx {
u16 connected;
};
-extern u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf);
extern int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
extern void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
extern struct sk_buff *cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign);
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 1819b59..ef9be7e 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -66,7 +66,6 @@
* port.
* @flags: usb serial port flags
* @write_wait: a wait_queue_head_t used by the port.
- * @delta_msr_wait: modem-status-change wait queue
* @work: work queue entry for the line discipline waking up.
* @throttled: nonzero if the read urb is inactive to throttle the device
* @throttle_req: nonzero if the tty wants to throttle us
@@ -113,7 +112,6 @@ struct usb_serial_port {
unsigned long flags;
wait_queue_head_t write_wait;
- wait_queue_head_t delta_msr_wait;
struct work_struct work;
char throttled;
char throttle_req;
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 5209cfe..b9bd2e6 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -26,8 +26,6 @@ struct user_namespace {
kuid_t owner;
kgid_t group;
unsigned int proc_inum;
- bool may_mount_sysfs;
- bool may_mount_proc;
};
extern struct user_namespace init_user_ns;
@@ -84,6 +82,4 @@ static inline void put_user_ns(struct user_namespace *ns)
#endif
-void update_mnt_policy(struct user_namespace *userns);
-
#endif /* _LINUX_USER_H */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 839806b..a13291f 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -29,9 +29,7 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
static inline void __count_vm_event(enum vm_event_item item)
{
- preempt_disable_rt();
__this_cpu_inc(vm_event_states.event[item]);
- preempt_enable_rt();
}
static inline void count_vm_event(enum vm_event_item item)
@@ -41,9 +39,7 @@ static inline void count_vm_event(enum vm_event_item item)
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
- preempt_disable_rt();
__this_cpu_add(vm_event_states.event[item], delta);
- preempt_enable_rt();
}
static inline void count_vm_events(enum vm_event_item item, long delta)
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index e8d6571..50ae7d0 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -47,7 +47,6 @@ int con_set_cmap(unsigned char __user *cmap);
int con_get_cmap(unsigned char __user *cmap);
void scrollback(struct vc_data *vc, int lines);
void scrollfront(struct vc_data *vc, int lines);
-void clear_buffer_attributes(struct vc_data *vc);
void update_region(struct vc_data *vc, unsigned long start, int count);
void redraw_screen(struct vc_data *vc, int is_switch);
#define update_screen(x) redraw_screen(x, 0)
@@ -131,8 +130,6 @@ void vt_event_post(unsigned int event, unsigned int old, unsigned int new);
int vt_waitactive(int n);
void change_console(struct vc_data *new_vc);
void reset_vc(struct vc_data *vc);
-extern int do_unbind_con_driver(const struct consw *csw, int first, int last,
- int deflt);
extern int unbind_con_driver(const struct consw *csw, int first, int last,
int deflt);
int vty_init(const struct file_operations *console_fops);
diff --git a/include/linux/wait-simple.h b/include/linux/wait-simple.h
deleted file mode 100644
index 4efba4d..0000000
--- a/include/linux/wait-simple.h
+++ /dev/null
@@ -1,199 +0,0 @@
-#ifndef _LINUX_WAIT_SIMPLE_H
-#define _LINUX_WAIT_SIMPLE_H
-
-#include <linux/spinlock.h>
-#include <linux/list.h>
-
-#include <asm/current.h>
-
-struct swaiter {
- struct task_struct *task;
- struct list_head node;
-};
-
-#define DEFINE_SWAITER(name) \
- struct swaiter name = { \
- .task = current, \
- .node = LIST_HEAD_INIT((name).node), \
- }
-
-struct swait_head {
- raw_spinlock_t lock;
- struct list_head list;
-};
-
-#define SWAIT_HEAD_INITIALIZER(name) { \
- .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
- .list = LIST_HEAD_INIT((name).list), \
- }
-
-#define DEFINE_SWAIT_HEAD(name) \
- struct swait_head name = SWAIT_HEAD_INITIALIZER(name)
-
-extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key);
-
-#define init_swait_head(swh) \
- do { \
- static struct lock_class_key __key; \
- \
- __init_swait_head((swh), &__key); \
- } while (0)
-
-/*
- * Waiter functions
- */
-extern void swait_prepare_locked(struct swait_head *head, struct swaiter *w);
-extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state);
-extern void swait_finish_locked(struct swait_head *head, struct swaiter *w);
-extern void swait_finish(struct swait_head *head, struct swaiter *w);
-
-/*
- * Wakeup functions
- */
-extern unsigned int __swait_wake(struct swait_head *head, unsigned int state, unsigned int num);
-extern unsigned int __swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num);
-
-#define swait_wake(head) __swait_wake(head, TASK_NORMAL, 1)
-#define swait_wake_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 1)
-#define swait_wake_all(head) __swait_wake(head, TASK_NORMAL, 0)
-#define swait_wake_all_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 0)
-
-/*
- * Event API
- */
-#define __swait_event(wq, condition) \
-do { \
- DEFINE_SWAITER(__wait); \
- \
- for (;;) { \
- swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
- if (condition) \
- break; \
- schedule(); \
- } \
- swait_finish(&wq, &__wait); \
-} while (0)
-
-/**
- * swait_event - sleep until a condition gets true
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- *
- * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
- * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- */
-#define swait_event(wq, condition) \
-do { \
- if (condition) \
- break; \
- __swait_event(wq, condition); \
-} while (0)
-
-#define __swait_event_interruptible(wq, condition, ret) \
-do { \
- DEFINE_SWAITER(__wait); \
- \
- for (;;) { \
- swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (signal_pending(current)) { \
- ret = -ERESTARTSYS; \
- break; \
- } \
- schedule(); \
- } \
- swait_finish(&wq, &__wait); \
-} while (0)
-
-#define __swait_event_interruptible_timeout(wq, condition, ret) \
-do { \
- DEFINE_SWAITER(__wait); \
- \
- for (;;) { \
- swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (signal_pending(current)) { \
- ret = -ERESTARTSYS; \
- break; \
- } \
- ret = schedule_timeout(ret); \
- if (!ret) \
- break; \
- } \
- swait_finish(&wq, &__wait); \
-} while (0)
-
-/**
- * swait_event_interruptible - sleep until a condition gets true
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- *
- * The process is put to sleep (TASK_INTERRUPTIBLE) until the
- * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- */
-#define swait_event_interruptible(wq, condition) \
-({ \
- int __ret = 0; \
- if (!(condition)) \
- __swait_event_interruptible(wq, condition, __ret); \
- __ret; \
-})
-
-#define swait_event_interruptible_timeout(wq, condition, timeout) \
-({ \
- int __ret = timeout; \
- if (!(condition)) \
- __swait_event_interruptible_timeout(wq, condition, __ret); \
- __ret; \
-})
-
-#define __swait_event_timeout(wq, condition, ret) \
-do { \
- DEFINE_SWAITER(__wait); \
- \
- for (;;) { \
- swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
- if (condition) \
- break; \
- ret = schedule_timeout(ret); \
- if (!ret) \
- break; \
- } \
- swait_finish(&wq, &__wait); \
-} while (0)
-
-/**
- * swait_event_timeout - sleep until a condition gets true or a timeout elapses
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- * @timeout: timeout, in jiffies
- *
- * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
- * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- *
- * The function returns 0 if the @timeout elapsed, and the remaining
- * jiffies if the condition evaluated to true before the timeout elapsed.
- */
-#define swait_event_timeout(wq, condition, timeout) \
-({ \
- long __ret = timeout; \
- if (!(condition)) \
- __swait_event_timeout(wq, condition, __ret); \
- __ret; \
-})
-
-#endif