summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2015-02-13 22:12:06 (GMT)
committerScott Wood <scottwood@freescale.com>2015-02-13 22:19:22 (GMT)
commit6faa2909871d8937cb2f79a10e1b21ffe193fac1 (patch)
treef558a94f1553814cc122ab8d9e04c0ebad5262a5 /include/linux
parentfcb2fb84301c673ee15ca04e7a2fc965712d49a0 (diff)
downloadlinux-fsl-qoriq-6faa2909871d8937cb2f79a10e1b21ffe193fac1.tar.xz
Reset to 3.12.37
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/audit.h4
-rw-r--r--include/linux/bio.h1
-rw-r--r--include/linux/blkdev.h5
-rw-r--r--include/linux/buffer_head.h44
-rw-r--r--include/linux/capability.h7
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--include/linux/compaction.h20
-rw-r--r--include/linux/compiler-gcc.h3
-rw-r--r--include/linux/compiler-gcc5.h66
-rw-r--r--include/linux/compiler-intel.h7
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/completion.h8
-rw-r--r--include/linux/cpu.h4
-rw-r--r--include/linux/cpuset.h56
-rw-r--r--include/linux/crash_dump.h15
-rw-r--r--include/linux/cred.h1
-rw-r--r--include/linux/crypto.h13
-rw-r--r--include/linux/dcache.h10
-rw-r--r--include/linux/delay.h6
-rw-r--r--include/linux/dma-mapping.h14
-rw-r--r--include/linux/fs.h11
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/ftrace_event.h3
-rw-r--r--include/linux/gfp.h4
-rw-r--r--include/linux/hid-sensor-hub.h3
-rw-r--r--include/linux/hid.h1
-rw-r--r--include/linux/highmem.h28
-rw-r--r--include/linux/hrtimer.h19
-rw-r--r--include/linux/huge_mm.h4
-rw-r--r--include/linux/hugetlb.h23
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/idr.h4
-rw-r--r--include/linux/if_team.h1
-rw-r--r--include/linux/if_vlan.h6
-rw-r--r--include/linux/iio/events.h2
-rw-r--r--include/linux/iio/trigger.h4
-rw-r--r--include/linux/inetdevice.h2
-rw-r--r--include/linux/init_task.h12
-rw-r--r--include/linux/interrupt.h104
-rw-r--r--include/linux/irq.h8
-rw-r--r--include/linux/irq_work.h1
-rw-r--r--include/linux/irqdesc.h5
-rw-r--r--include/linux/irqflags.h29
-rw-r--r--include/linux/jbd2.h30
-rw-r--r--include/linux/jbd_common.h24
-rw-r--r--include/linux/jiffies.h12
-rw-r--r--include/linux/jump_label.h23
-rw-r--r--include/linux/kdb.h3
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/kernel_stat.h1
-rw-r--r--include/linux/kgdb.h2
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/linux/lglock.h19
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/list.h19
-rw-r--r--include/linux/list_bl.h28
-rw-r--r--include/linux/locallock.h270
-rw-r--r--include/linux/mempolicy.h6
-rw-r--r--include/linux/migrate.h11
-rw-r--r--include/linux/mm.h60
-rw-r--r--include/linux/mm_types.h12
-rw-r--r--include/linux/mmzone.h237
-rw-r--r--include/linux/module.h2
-rw-r--r--include/linux/mount.h9
-rw-r--r--include/linux/mutex.h20
-rw-r--r--include/linux/mutex_rt.h84
-rw-r--r--include/linux/netdevice.h17
-rw-r--r--include/linux/netfilter/x_tables.h7
-rw-r--r--include/linux/netlink.h14
-rw-r--r--include/linux/nfs_xdr.h11
-rw-r--r--include/linux/notifier.h34
-rw-r--r--include/linux/of.h84
-rw-r--r--include/linux/oom.h3
-rw-r--r--include/linux/page-flags.h18
-rw-r--r--include/linux/page_cgroup.h15
-rw-r--r--include/linux/pageblock-flags.h30
-rw-r--r--include/linux/pagemap.h130
-rw-r--r--include/linux/pagevec.h5
-rw-r--r--include/linux/pci.h10
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/percpu-refcount.h6
-rw-r--r--include/linux/percpu.h25
-rw-r--r--include/linux/pid.h1
-rw-r--r--include/linux/plist.h45
-rw-r--r--include/linux/preempt.h70
-rw-r--r--include/linux/preempt_mask.h15
-rw-r--r--include/linux/printk.h9
-rw-r--r--include/linux/pstore_ram.h4
-rw-r--r--include/linux/ptrace.h35
-rw-r--r--include/linux/quotaops.h8
-rw-r--r--include/linux/radix-tree.h12
-rw-r--r--include/linux/random.h2
-rw-r--r--include/linux/rcupdate.h26
-rw-r--r--include/linux/rcutree.h18
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--include/linux/rtmutex.h42
-rw-r--r--include/linux/rtnetlink.h5
-rw-r--r--include/linux/rwlock_rt.h99
-rw-r--r--include/linux/rwlock_types.h7
-rw-r--r--include/linux/rwlock_types_rt.h33
-rw-r--r--include/linux/rwsem.h6
-rw-r--r--include/linux/rwsem_rt.h128
-rw-r--r--include/linux/sched.h252
-rw-r--r--include/linux/sched/rt.h5
-rw-r--r--include/linux/seqlock.h54
-rw-r--r--include/linux/serio.h1
-rw-r--r--include/linux/shmem_fs.h1
-rw-r--r--include/linux/signal.h1
-rw-r--r--include/linux/skbuff.h8
-rw-r--r--include/linux/smp.h3
-rw-r--r--include/linux/sock_diag.h2
-rw-r--r--include/linux/spinlock.h12
-rw-r--r--include/linux/spinlock_api_smp.h4
-rw-r--r--include/linux/spinlock_rt.h166
-rw-r--r--include/linux/spinlock_types.h79
-rw-r--r--include/linux/spinlock_types_nort.h33
-rw-r--r--include/linux/spinlock_types_raw.h56
-rw-r--r--include/linux/spinlock_types_rt.h51
-rw-r--r--include/linux/srcu.h9
-rw-r--r--include/linux/string.h5
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/sunrpc/svcsock.h1
-rw-r--r--include/linux/swap.h30
-rw-r--r--include/linux/swapfile.h2
-rw-r--r--include/linux/sysctl.h1
-rw-r--r--include/linux/time.h13
-rw-r--r--include/linux/timer.h2
-rw-r--r--include/linux/uaccess.h41
-rw-r--r--include/linux/uprobes.h1
-rw-r--r--include/linux/usb/quirks.h20
-rw-r--r--include/linux/user_namespace.h12
-rw-r--r--include/linux/vga_switcheroo.h2
-rw-r--r--include/linux/vm_event_item.h4
-rw-r--r--include/linux/vmacache.h38
-rw-r--r--include/linux/vmstat.h12
-rw-r--r--include/linux/wait-simple.h207
-rw-r--r--include/linux/wait.h1
-rw-r--r--include/linux/workqueue.h2
138 files changed, 1243 insertions, 2316 deletions
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 4fb28b2..c25cb64d 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -46,6 +46,7 @@ struct audit_tree;
struct audit_krule {
int vers_ops;
+ u32 pflags;
u32 flags;
u32 listnr;
u32 action;
@@ -63,6 +64,9 @@ struct audit_krule {
u64 prio;
};
+/* Flag to indicate legacy AUDIT_LOGINUID unset usage */
+#define AUDIT_LOGINUID_LEGACY 0x1
+
struct audit_field {
u32 type;
u32 val;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index ec48bac..6c17ad5 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -187,6 +187,7 @@ struct bio_integrity_payload {
unsigned short bip_slab; /* slab the bip came from */
unsigned short bip_vcnt; /* # of integrity bio_vecs */
unsigned short bip_idx; /* current bip_vec index */
+ unsigned short bip_max_vcnt; /* integrity bio_vec slots */
unsigned bip_owns_buf:1; /* should free bip_buf */
struct work_struct bip_work; /* I/O completion */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0e6f765..b105678 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1198,10 +1198,9 @@ static inline int queue_alignment_offset(struct request_queue *q)
static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
{
unsigned int granularity = max(lim->physical_block_size, lim->io_min);
- unsigned int alignment = (sector << 9) & (granularity - 1);
+ unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
- return (granularity + lim->alignment_offset - alignment)
- & (granularity - 1);
+ return (granularity + lim->alignment_offset - alignment) % granularity;
}
static inline int bdev_alignment_offset(struct block_device *bdev)
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index cac4973..d77797a 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -75,52 +75,8 @@ struct buffer_head {
struct address_space *b_assoc_map; /* mapping this buffer is
associated with */
atomic_t b_count; /* users using this buffer_head */
-#ifdef CONFIG_PREEMPT_RT_BASE
- spinlock_t b_uptodate_lock;
-#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
- defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
- spinlock_t b_state_lock;
- spinlock_t b_journal_head_lock;
-#endif
-#endif
};
-static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
-{
- unsigned long flags;
-
-#ifndef CONFIG_PREEMPT_RT_BASE
- local_irq_save(flags);
- bit_spin_lock(BH_Uptodate_Lock, &bh->b_state);
-#else
- spin_lock_irqsave(&bh->b_uptodate_lock, flags);
-#endif
- return flags;
-}
-
-static inline void
-bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
-{
-#ifndef CONFIG_PREEMPT_RT_BASE
- bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state);
- local_irq_restore(flags);
-#else
- spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
-#endif
-}
-
-static inline void buffer_head_init_locks(struct buffer_head *bh)
-{
-#ifdef CONFIG_PREEMPT_RT_BASE
- spin_lock_init(&bh->b_uptodate_lock);
-#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
- defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
- spin_lock_init(&bh->b_state_lock);
- spin_lock_init(&bh->b_journal_head_lock);
-#endif
-#endif
-}
-
/*
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* and buffer_foo() functions.
diff --git a/include/linux/capability.h b/include/linux/capability.h
index a6ee1f9..aa93e5e 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -78,8 +78,11 @@ extern const kernel_cap_t __cap_init_eff_set;
# error Fix up hand-coded capability macro initializers
#else /* HAND-CODED capability initializers */
+#define CAP_LAST_U32 ((_KERNEL_CAPABILITY_U32S) - 1)
+#define CAP_LAST_U32_VALID_MASK (CAP_TO_MASK(CAP_LAST_CAP + 1) -1)
+
# define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }})
-# define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }})
+# define CAP_FULL_SET ((kernel_cap_t){{ ~0, CAP_LAST_U32_VALID_MASK }})
# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
| CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \
CAP_FS_MASK_B1 } })
@@ -210,7 +213,7 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
struct user_namespace *ns, int cap);
extern bool capable(int cap);
extern bool ns_capable(struct user_namespace *ns, int cap);
-extern bool inode_capable(const struct inode *inode, int cap);
+extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
/* audit system wants to get cap info from files as well */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index dbbf8aa..4802826 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -289,7 +289,7 @@ extern struct clocksource* clocksource_get_next(void);
extern void clocksource_change_rating(struct clocksource *cs, int rating);
extern void clocksource_suspend(void);
extern void clocksource_resume(void);
-extern struct clocksource * __init __weak clocksource_default_clock(void);
+extern struct clocksource * __init clocksource_default_clock(void);
extern void clocksource_mark_unstable(struct clocksource *cs);
extern void
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 091d72e..01e3132 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -22,7 +22,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
extern int fragmentation_index(struct zone *zone, unsigned int order);
extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *mask,
- bool sync, bool *contended);
+ enum migrate_mode mode, bool *contended);
extern void compact_pgdat(pg_data_t *pgdat, int order);
extern void reset_isolation_suitable(pg_data_t *pgdat);
extern unsigned long compaction_suitable(struct zone *zone, int order);
@@ -62,6 +62,22 @@ static inline bool compaction_deferred(struct zone *zone, int order)
return zone->compact_considered < defer_limit;
}
+/*
+ * Update defer tracking counters after successful compaction of given order,
+ * which means an allocation either succeeded (alloc_success == true) or is
+ * expected to succeed.
+ */
+static inline void compaction_defer_reset(struct zone *zone, int order,
+ bool alloc_success)
+{
+ if (alloc_success) {
+ zone->compact_considered = 0;
+ zone->compact_defer_shift = 0;
+ }
+ if (order >= zone->compact_order_failed)
+ zone->compact_order_failed = order + 1;
+}
+
/* Returns true if restarting compaction after many failures */
static inline bool compaction_restarting(struct zone *zone, int order)
{
@@ -75,7 +91,7 @@ static inline bool compaction_restarting(struct zone *zone, int order)
#else
static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *nodemask,
- bool sync, bool *contended)
+ enum migrate_mode mode, bool *contended)
{
return COMPACT_CONTINUE;
}
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 24545cd..02ae99e 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -37,6 +37,9 @@
__asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
(typeof(ptr)) (__ptr + (off)); })
+/* Make the optimizer believe the variable can be manipulated arbitrarily. */
+#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))
+
#ifdef __CHECKER__
#define __must_be_array(arr) 0
#else
diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
new file mode 100644
index 0000000..cdd1cc2
--- /dev/null
+++ b/include/linux/compiler-gcc5.h
@@ -0,0 +1,66 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
+#endif
+
+#define __used __attribute__((__used__))
+#define __must_check __attribute__((warn_unused_result))
+#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
+
+/* Mark functions as cold. gcc will assume any path leading to a call
+ to them will be unlikely. This means a lot of manual unlikely()s
+ are unnecessary now for any paths leading to the usual suspects
+ like BUG(), printk(), panic() etc. [but let's keep them for now for
+ older compilers]
+
+ Early snapshots of gcc 4.3 don't support this and we can't detect this
+ in the preprocessor, but we can live with this because they're unreleased.
+ Maketime probing would be overkill here.
+
+ gcc also has a __attribute__((__hot__)) to move hot functions into
+ a special section, but I don't see any sense in this right now in
+ the kernel context */
+#define __cold __attribute__((__cold__))
+
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+#ifndef __CHECKER__
+# define __compiletime_warning(message) __attribute__((warning(message)))
+# define __compiletime_error(message) __attribute__((error(message)))
+#endif /* __CHECKER__ */
+
+/*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ *
+ * Early snapshots of gcc 4.5 don't support this and we can't detect
+ * this in the preprocessor, but we can live with this because they're
+ * unreleased. Really, we need to have autoconf for the kernel.
+ */
+#define unreachable() __builtin_unreachable()
+
+/* Mark a function definition as prohibited from being cloned. */
+#define __noclone __attribute__((__noclone__))
+
+/*
+ * Tell the optimizer that something else uses this function or variable.
+ */
+#define __visible __attribute__((externally_visible))
+
+/*
+ * GCC 'asm goto' miscompiles certain code sequences:
+ *
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ *
+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ * Fixed in GCC 4.8.2 and later versions.
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+
+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#define __HAVE_BUILTIN_BSWAP16__
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index dc1bd3d..5529c52 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -15,6 +15,7 @@
*/
#undef barrier
#undef RELOC_HIDE
+#undef OPTIMIZER_HIDE_VAR
#define barrier() __memory_barrier()
@@ -23,6 +24,12 @@
__ptr = (unsigned long) (ptr); \
(typeof(ptr)) (__ptr + (off)); })
+/* This should act as an optimization barrier on var.
+ * Given that this compiler does not have inline assembly, a compiler barrier
+ * is the best we can do.
+ */
+#define OPTIMIZER_HIDE_VAR(var) barrier()
+
/* Intel ECC compiler doesn't support __builtin_types_compatible_p() */
#define __must_be_array(a) 0
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 92669cd..a2329c5 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -170,6 +170,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
(typeof(ptr)) (__ptr + (off)); })
#endif
+#ifndef OPTIMIZER_HIDE_VAR
+#define OPTIMIZER_HIDE_VAR(var) barrier()
+#endif
+
/* Not-quite-unique ID. */
#ifndef __UNIQUE_ID
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
diff --git a/include/linux/completion.h b/include/linux/completion.h
index eb2d4ac..3cd574d 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -8,7 +8,7 @@
* See kernel/sched/core.c for details.
*/
-#include <linux/wait-simple.h>
+#include <linux/wait.h>
/*
* struct completion - structure used to maintain state for a "completion"
@@ -24,11 +24,11 @@
*/
struct completion {
unsigned int done;
- struct swait_head wait;
+ wait_queue_head_t wait;
};
#define COMPLETION_INITIALIZER(work) \
- { 0, SWAIT_HEAD_INITIALIZER((work).wait) }
+ { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
#define COMPLETION_INITIALIZER_ONSTACK(work) \
({ init_completion(&work); work; })
@@ -73,7 +73,7 @@ struct completion {
static inline void init_completion(struct completion *x)
{
x->done = 0;
- init_swait_head(&x->wait);
+ init_waitqueue_head(&x->wait);
}
extern void wait_for_completion(struct completion *);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 0c2b05c..801ff9e 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -179,8 +179,6 @@ extern void get_online_cpus(void);
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
-extern void pin_current_cpu(void);
-extern void unpin_current_cpu(void);
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
@@ -208,8 +206,6 @@ static inline void cpu_hotplug_done(void) {}
#define put_online_cpus() do { } while (0)
#define cpu_hotplug_disable() do { } while (0)
#define cpu_hotplug_enable() do { } while (0)
-static inline void pin_current_cpu(void) { }
-static inline void unpin_current_cpu(void) { }
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
/* These aren't inline functions due to a GCC bug. */
#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index cc1b01c..a7ebb89 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -12,10 +12,31 @@
#include <linux/cpumask.h>
#include <linux/nodemask.h>
#include <linux/mm.h>
+#include <linux/jump_label.h>
#ifdef CONFIG_CPUSETS
-extern int number_of_cpusets; /* How many cpusets are defined in system? */
+extern struct static_key cpusets_enabled_key;
+static inline bool cpusets_enabled(void)
+{
+ return static_key_false(&cpusets_enabled_key);
+}
+
+static inline int nr_cpusets(void)
+{
+ /* jump label reference count + the top-level cpuset */
+ return static_key_count(&cpusets_enabled_key) + 1;
+}
+
+static inline void cpuset_inc(void)
+{
+ static_key_slow_inc(&cpusets_enabled_key);
+}
+
+static inline void cpuset_dec(void)
+{
+ static_key_slow_dec(&cpusets_enabled_key);
+}
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
@@ -32,13 +53,13 @@ extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
{
- return number_of_cpusets <= 1 ||
+ return nr_cpusets() <= 1 ||
__cpuset_node_allowed_softwall(node, gfp_mask);
}
static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
{
- return number_of_cpusets <= 1 ||
+ return nr_cpusets() <= 1 ||
__cpuset_node_allowed_hardwall(node, gfp_mask);
}
@@ -87,25 +108,26 @@ extern void rebuild_sched_domains(void);
extern void cpuset_print_task_mems_allowed(struct task_struct *p);
/*
- * get_mems_allowed is required when making decisions involving mems_allowed
- * such as during page allocation. mems_allowed can be updated in parallel
- * and depending on the new value an operation can fail potentially causing
- * process failure. A retry loop with get_mems_allowed and put_mems_allowed
- * prevents these artificial failures.
+ * read_mems_allowed_begin is required when making decisions involving
+ * mems_allowed such as during page allocation. mems_allowed can be updated in
+ * parallel and depending on the new value an operation can fail potentially
+ * causing process failure. A retry loop with read_mems_allowed_begin and
+ * read_mems_allowed_retry prevents these artificial failures.
*/
-static inline unsigned int get_mems_allowed(void)
+static inline unsigned int read_mems_allowed_begin(void)
{
return read_seqcount_begin(&current->mems_allowed_seq);
}
/*
- * If this returns false, the operation that took place after get_mems_allowed
- * may have failed. It is up to the caller to retry the operation if
+ * If this returns true, the operation that took place after
+ * read_mems_allowed_begin may have failed artificially due to a concurrent
+ * update of mems_allowed. It is up to the caller to retry the operation if
* appropriate.
*/
-static inline bool put_mems_allowed(unsigned int seq)
+static inline bool read_mems_allowed_retry(unsigned int seq)
{
- return !read_seqcount_retry(&current->mems_allowed_seq, seq);
+ return read_seqcount_retry(&current->mems_allowed_seq, seq);
}
static inline void set_mems_allowed(nodemask_t nodemask)
@@ -119,6 +141,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
#else /* !CONFIG_CPUSETS */
+static inline bool cpusets_enabled(void) { return false; }
+
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
@@ -221,14 +245,14 @@ static inline void set_mems_allowed(nodemask_t nodemask)
{
}
-static inline unsigned int get_mems_allowed(void)
+static inline unsigned int read_mems_allowed_begin(void)
{
return 0;
}
-static inline bool put_mems_allowed(unsigned int seq)
+static inline bool read_mems_allowed_retry(unsigned int seq)
{
- return true;
+ return false;
}
#endif /* !CONFIG_CPUSETS */
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 7032518..60023e5 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -14,14 +14,13 @@
extern unsigned long long elfcorehdr_addr;
extern unsigned long long elfcorehdr_size;
-extern int __weak elfcorehdr_alloc(unsigned long long *addr,
- unsigned long long *size);
-extern void __weak elfcorehdr_free(unsigned long long addr);
-extern ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos);
-extern ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
-extern int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
- unsigned long from, unsigned long pfn,
- unsigned long size, pgprot_t prot);
+extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
+extern void elfcorehdr_free(unsigned long long addr);
+extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);
+extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
+extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
+ unsigned long from, unsigned long pfn,
+ unsigned long size, pgprot_t prot);
extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
unsigned long, int);
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 04421e8..6c58dd7 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -68,6 +68,7 @@ extern void groups_free(struct group_info *);
extern int set_current_groups(struct group_info *);
extern int set_groups(struct cred *, struct group_info *);
extern int groups_search(const struct group_info *, kgid_t);
+extern bool may_setgroups(void);
/* access the groups "array" with this macro */
#define GROUP_AT(gi, i) \
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index b92eadf..2b00d92 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -26,6 +26,19 @@
#include <linux/uaccess.h>
/*
+ * Autoloaded crypto modules should only use a prefixed name to avoid allowing
+ * arbitrary modules to be loaded. Loading from userspace may still need the
+ * unprefixed names, so retains those aliases as well.
+ * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
+ * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
+ * expands twice on the same line. Instead, use a separate base name for the
+ * alias.
+ */
+#define MODULE_ALIAS_CRYPTO(name) \
+ __MODULE_INFO(alias, alias_userspace, name); \
+ __MODULE_INFO(alias, alias_crypto, "crypto-" name)
+
+/*
* Algorithm masks and types.
*/
#define CRYPTO_ALG_TYPE_MASK 0x0000000f
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 59066e0..53c1b60 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -122,15 +122,15 @@ struct dentry {
void *d_fsdata; /* fs-specific data */
struct list_head d_lru; /* LRU list */
+ struct list_head d_child; /* child of parent list */
+ struct list_head d_subdirs; /* our children */
/*
- * d_child and d_rcu can share memory
+ * d_alias and d_rcu can share memory
*/
union {
- struct list_head d_child; /* child of parent list */
+ struct hlist_node d_alias; /* inode alias list */
struct rcu_head d_rcu;
} d_u;
- struct list_head d_subdirs; /* our children */
- struct hlist_node d_alias; /* inode alias list */
};
/*
@@ -211,6 +211,8 @@ struct dentry_operations {
#define DCACHE_LRU_LIST 0x80000
#define DCACHE_DENTRY_KILLED 0x100000
+#define DCACHE_MAY_FREE 0x00800000
+
extern seqlock_t rename_lock;
static inline int dname_external(const struct dentry *dentry)
diff --git a/include/linux/delay.h b/include/linux/delay.h
index 37caab3..a6ecb34 100644
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -52,10 +52,4 @@ static inline void ssleep(unsigned int seconds)
msleep(seconds * 1000);
}
-#ifdef CONFIG_PREEMPT_RT_FULL
-extern void cpu_chill(void);
-#else
-# define cpu_chill() cpu_relax()
-#endif
-
#endif /* defined(_LINUX_DELAY_H) */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 3a8d0a2..ec951f9 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -97,6 +97,20 @@ static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
}
#endif
+/*
+ * Set both the DMA mask and the coherent DMA mask to the same thing.
+ * Note that we don't check the return value from dma_set_coherent_mask()
+ * as the DMA API guarantees that the coherent DMA mask can be set to
+ * the same or smaller than the streaming DMA mask.
+ */
+static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
+{
+ int rc = dma_set_mask(dev, mask);
+ if (rc == 0)
+ dma_set_coherent_mask(dev, mask);
+ return rc;
+}
+
extern u64 dma_get_required_mask(struct device *dev);
static inline unsigned int dma_get_max_seg_size(struct device *dev)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 164d2a9..9cb726a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2217,7 +2217,13 @@ extern int filemap_fdatawrite_range(struct address_space *mapping,
extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync);
extern int vfs_fsync(struct file *file, int datasync);
-extern int generic_write_sync(struct file *file, loff_t pos, loff_t count);
+static inline int generic_write_sync(struct file *file, loff_t pos, loff_t count)
+{
+ if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
+ return 0;
+ return vfs_fsync_range(file, pos, pos + count - 1,
+ (file->f_flags & __O_SYNC) ? 0 : 1);
+}
extern void emergency_sync(void);
extern void emergency_remount(void);
#ifdef CONFIG_BLOCK
@@ -2490,6 +2496,9 @@ static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
void inode_dio_wait(struct inode *inode);
void inode_dio_done(struct inode *inode);
+extern void inode_set_flags(struct inode *inode, unsigned int flags,
+ unsigned int mask);
+
extern const struct file_operations generic_ro_fops;
#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 9f15c00..e68db4d 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -524,6 +524,7 @@ static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_a
extern int ftrace_arch_read_dyn_info(char *buf, int size);
extern int skip_trace(unsigned long ip);
+extern void ftrace_module_init(struct module *mod);
extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void);
@@ -533,6 +534,7 @@ static inline int ftrace_force_update(void) { return 0; }
static inline void ftrace_disable_daemon(void) { }
static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_release_mod(struct module *mod) {}
+static inline void ftrace_module_init(struct module *mod) {}
static inline int register_ftrace_command(struct ftrace_func_command *cmd)
{
return -EINVAL;
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index ab58c33..20aebdb 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -56,9 +56,6 @@ struct trace_entry {
unsigned char flags;
unsigned char preempt_count;
int pid;
- unsigned short migrate_disable;
- unsigned short padding;
- unsigned char preempt_lazy_count;
};
#define FTRACE_MAX_EVENT \
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 9b4dd49..fa7ac98 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -364,8 +364,8 @@ void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
-extern void free_hot_cold_page(struct page *page, int cold);
-extern void free_hot_cold_page_list(struct list_head *list, int cold);
+extern void free_hot_cold_page(struct page *page, bool cold);
+extern void free_hot_cold_page_list(struct list_head *list, bool cold);
extern void __free_memcg_kmem_pages(struct page *page, unsigned int order);
extern void free_memcg_kmem_pages(unsigned long addr, unsigned int order);
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 32ba451..c32411b 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -21,6 +21,8 @@
#include <linux/hid.h>
#include <linux/hid-sensor-ids.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
/**
* struct hid_sensor_hub_attribute_info - Attribute info
@@ -166,6 +168,7 @@ struct hid_sensor_common {
struct platform_device *pdev;
unsigned usage_id;
bool data_ready;
+ struct iio_trigger *trigger;
struct hid_sensor_hub_attribute_info poll;
struct hid_sensor_hub_attribute_info report_state;
struct hid_sensor_hub_attribute_info power_state;
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 31b9d29..00c88fc 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -286,6 +286,7 @@ struct hid_item {
#define HID_QUIRK_HIDINPUT_FORCE 0x00000080
#define HID_QUIRK_NO_EMPTY_INPUT 0x00000100
#define HID_QUIRK_NO_INIT_INPUT_REPORTS 0x00000200
+#define HID_QUIRK_ALWAYS_POLL 0x00000400
#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000
#define HID_QUIRK_NO_INIT_REPORTS 0x20000000
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 821d523..7fb31da 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -7,7 +7,6 @@
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
-#include <linux/sched.h>
#include <asm/cacheflush.h>
@@ -86,51 +85,32 @@ static inline void __kunmap_atomic(void *addr)
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
-#ifndef CONFIG_PREEMPT_RT_FULL
DECLARE_PER_CPU(int, __kmap_atomic_idx);
-#endif
static inline int kmap_atomic_idx_push(void)
{
-#ifndef CONFIG_PREEMPT_RT_FULL
int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
-# ifdef CONFIG_DEBUG_HIGHMEM
+#ifdef CONFIG_DEBUG_HIGHMEM
WARN_ON_ONCE(in_irq() && !irqs_disabled());
BUG_ON(idx > KM_TYPE_NR);
-# endif
- return idx;
-#else
- current->kmap_idx++;
- BUG_ON(current->kmap_idx > KM_TYPE_NR);
- return current->kmap_idx - 1;
#endif
+ return idx;
}
static inline int kmap_atomic_idx(void)
{
-#ifndef CONFIG_PREEMPT_RT_FULL
return __this_cpu_read(__kmap_atomic_idx) - 1;
-#else
- return current->kmap_idx - 1;
-#endif
}
static inline void kmap_atomic_idx_pop(void)
{
-#ifndef CONFIG_PREEMPT_RT_FULL
-# ifdef CONFIG_DEBUG_HIGHMEM
+#ifdef CONFIG_DEBUG_HIGHMEM
int idx = __this_cpu_dec_return(__kmap_atomic_idx);
BUG_ON(idx < 0);
-# else
- __this_cpu_dec(__kmap_atomic_idx);
-# endif
#else
- current->kmap_idx--;
-# ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(current->kmap_idx < 0);
-# endif
+ __this_cpu_dec(__kmap_atomic_idx);
#endif
}
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index bdbf77db..d19a5c2 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -111,11 +111,6 @@ struct hrtimer {
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
unsigned long state;
- struct list_head cb_entry;
- int irqsafe;
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- ktime_t praecox;
-#endif
#ifdef CONFIG_TIMER_STATS
int start_pid;
void *start_site;
@@ -152,7 +147,6 @@ struct hrtimer_clock_base {
int index;
clockid_t clockid;
struct timerqueue_head active;
- struct list_head expired;
ktime_t resolution;
ktime_t (*get_time)(void);
ktime_t softirq_time;
@@ -196,9 +190,6 @@ struct hrtimer_cpu_base {
unsigned long nr_hangs;
ktime_t max_hang_time;
#endif
-#ifdef CONFIG_PREEMPT_RT_BASE
- wait_queue_head_t wait;
-#endif
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
};
@@ -394,13 +385,6 @@ static inline int hrtimer_restart(struct hrtimer *timer)
return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
-/* Softirq preemption could deadlock timer removal */
-#ifdef CONFIG_PREEMPT_RT_BASE
- extern void hrtimer_wait_for_timer(const struct hrtimer *timer);
-#else
-# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0)
-#endif
-
/* Query timers: */
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
@@ -461,8 +445,9 @@ extern int schedule_hrtimeout_range_clock(ktime_t *expires,
unsigned long delta, const enum hrtimer_mode mode, int clock);
extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
-/* Called from the periodic timer tick */
+/* Soft interrupt function to run the hrtimer queues: */
extern void hrtimer_run_queues(void);
+extern void hrtimer_run_pending(void);
/* Bootup initialization: */
extern void __init hrtimers_init(void);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index a291552..aac671b 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -92,10 +92,6 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
#endif /* CONFIG_DEBUG_VM */
extern unsigned long transparent_hugepage_flags;
-extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- pmd_t *dst_pmd, pmd_t *src_pmd,
- struct vm_area_struct *vma,
- unsigned long addr, unsigned long end);
extern int split_huge_page_to_list(struct page *page, struct list_head *list);
static inline int split_huge_page(struct page *page)
{
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 6125579..511b1a0 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -387,15 +387,23 @@ static inline pgoff_t basepage_index(struct page *page)
extern void dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn);
-int pmd_huge_support(void);
-/*
- * Currently hugepage migration is enabled only for pmd-based hugepage.
- * This function will be updated when hugepage migration is more widely
- * supported.
- */
static inline int hugepage_migration_support(struct hstate *h)
{
- return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT);
+#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
+ return huge_page_shift(h) == PMD_SHIFT;
+#else
+ return 0;
+#endif
+}
+
+static inline bool hugepages_supported(void)
+{
+ /*
+ * Some platform decide whether they support huge pages at boot
+ * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
+ * there is no such support
+ */
+ return HPAGE_SHIFT != 0;
}
#else /* CONFIG_HUGETLB_PAGE */
@@ -425,7 +433,6 @@ static inline pgoff_t basepage_index(struct page *page)
return page->index;
}
#define dissolve_free_huge_pages(s, e) do {} while (0)
-#define pmd_huge_support() 0
#define hugepage_migration_support(h) 0
#endif /* CONFIG_HUGETLB_PAGE */
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index d98503b..b6043a0 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -473,15 +473,18 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
* 0 . 13 (Windows Server 2008)
* 1 . 1 (Windows 7)
* 2 . 4 (Windows 8)
+ * 3 . 0 (Windows 8 R2)
*/
#define VERSION_WS2008 ((0 << 16) | (13))
#define VERSION_WIN7 ((1 << 16) | (1))
#define VERSION_WIN8 ((2 << 16) | (4))
+#define VERSION_WIN8_1 ((3 << 16) | (0))
+
#define VERSION_INVAL -1
-#define VERSION_CURRENT VERSION_WIN8
+#define VERSION_CURRENT VERSION_WIN8_1
/* Make maximum size of pipe payload of 16K */
#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
@@ -884,7 +887,7 @@ struct vmbus_channel_relid_released {
struct vmbus_channel_initiate_contact {
struct vmbus_channel_message_header header;
u32 vmbus_version_requested;
- u32 padding2;
+ u32 target_vcpu; /* The VCPU the host should respond to */
u64 interrupt_page;
u64 monitor_page1;
u64 monitor_page2;
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 267527b..871a213 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -92,14 +92,10 @@ void idr_init(struct idr *idp);
* Each idr_preload() should be matched with an invocation of this
* function. See idr_preload() for details.
*/
-#ifdef CONFIG_PREEMPT_RT_FULL
-void idr_preload_end(void);
-#else
static inline void idr_preload_end(void)
{
preempt_enable();
}
-#endif
/**
* idr_find - return pointer for given id
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index a899dc2..a6aa970 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -194,6 +194,7 @@ struct team {
bool user_carrier_enabled;
bool queue_override_enabled;
struct list_head *qom_lists; /* array of queue override mapping lists */
+ bool port_mtu_change_allowed;
struct {
unsigned int count;
unsigned int interval; /* in ms */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 715c343..0bd3943 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -90,7 +90,6 @@ extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
extern u16 vlan_dev_vlan_id(const struct net_device *dev);
extern bool vlan_do_receive(struct sk_buff **skb);
-extern struct sk_buff *vlan_untag(struct sk_buff *skb);
extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid);
extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid);
@@ -126,11 +125,6 @@ static inline bool vlan_do_receive(struct sk_buff **skb)
return false;
}
-static inline struct sk_buff *vlan_untag(struct sk_buff *skb)
-{
- return skb;
-}
-
static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
{
return 0;
diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h
index 13ce220..593ae7c 100644
--- a/include/linux/iio/events.h
+++ b/include/linux/iio/events.h
@@ -90,7 +90,7 @@ enum iio_event_direction {
#define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
-#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF)
+#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F)
#define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 369cf2c..68f46cd 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -84,10 +84,12 @@ static inline void iio_trigger_put(struct iio_trigger *trig)
put_device(&trig->dev);
}
-static inline void iio_trigger_get(struct iio_trigger *trig)
+static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig)
{
get_device(&trig->dev);
__module_get(trig->ops->owner);
+
+ return trig;
}
/**
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 79640e0..f738f92 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -234,7 +234,7 @@ static inline void in_dev_put(struct in_device *idev)
static __inline__ __be32 inet_make_mask(int logmask)
{
if (logmask)
- return htonl(~((1<<(32-logmask))-1));
+ return htonl(~((1U<<(32-logmask))-1));
return 0;
}
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 766558a..998f4df 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -40,6 +40,7 @@ extern struct fs_struct init_fs;
#define INIT_SIGNALS(sig) { \
.nr_threads = 1, \
+ .thread_head = LIST_HEAD_INIT(init_task.thread_node), \
.wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
.shared_pending = { \
.list = LIST_HEAD_INIT(sig.shared_pending.list), \
@@ -143,16 +144,9 @@ extern struct task_group root_task_group;
# define INIT_PERF_EVENTS(tsk)
#endif
-#ifdef CONFIG_PREEMPT_RT_BASE
-# define INIT_TIMER_LIST .posix_timer_list = NULL,
-#else
-# define INIT_TIMER_LIST
-#endif
-
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
# define INIT_VTIME(tsk) \
- .vtime_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.vtime_lock), \
- .vtime_seq = SEQCNT_ZERO, \
+ .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \
.vtime_snap = 0, \
.vtime_snap_whence = VTIME_SYS,
#else
@@ -214,13 +208,13 @@ extern struct task_group root_task_group;
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
- INIT_TIMER_LIST \
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
[PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
}, \
.thread_group = LIST_HEAD_INIT(tsk.thread_group), \
+ .thread_node = LIST_HEAD_INIT(init_signals.thread_head), \
INIT_IDS \
INIT_PERF_EVENTS(tsk) \
INIT_TRACE_IRQFLAGS \
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index a2609fb..623ab2d 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -58,7 +58,6 @@
* IRQF_NO_THREAD - Interrupt cannot be threaded
* IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
* resume time.
- * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
*/
#define IRQF_DISABLED 0x00000020
#define IRQF_SHARED 0x00000080
@@ -72,7 +71,6 @@
#define IRQF_FORCE_RESUME 0x00008000
#define IRQF_NO_THREAD 0x00010000
#define IRQF_EARLY_RESUME 0x00020000
-#define IRQF_NO_SOFTIRQ_CALL 0x00040000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
@@ -178,7 +176,7 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
#ifdef CONFIG_LOCKDEP
# define local_irq_enable_in_hardirq() do { } while (0)
#else
-# define local_irq_enable_in_hardirq() local_irq_enable_nort()
+# define local_irq_enable_in_hardirq() local_irq_enable()
#endif
extern void disable_irq_nosync(unsigned int irq);
@@ -200,7 +198,40 @@ static inline int check_wakeup_irqs(void) { return 0; }
extern cpumask_var_t irq_default_affinity;
-extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
+/* Internal implementation. Use the helpers below */
+extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
+ bool force);
+
+/**
+ * irq_set_affinity - Set the irq affinity of a given irq
+ * @irq: Interrupt to set affinity
+ * @mask: cpumask
+ *
+ * Fails if cpumask does not contain an online CPU
+ */
+static inline int
+irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
+{
+ return __irq_set_affinity(irq, cpumask, false);
+}
+
+/**
+ * irq_force_affinity - Force the irq affinity of a given irq
+ * @irq: Interrupt to set affinity
+ * @mask: cpumask
+ *
+ * Same as irq_set_affinity, but without checking the mask against
+ * online cpus.
+ *
+ * Solely for low level cpu hotplug code, where we need to make per
+ * cpu interrupts affine before the cpu becomes online.
+ */
+static inline int
+irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+{
+ return __irq_set_affinity(irq, cpumask, true);
+}
+
extern int irq_can_set_affinity(unsigned int irq);
extern int irq_select_affinity(unsigned int irq);
@@ -222,7 +253,6 @@ struct irq_affinity_notify {
unsigned int irq;
struct kref kref;
struct work_struct work;
- struct list_head list;
void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
void (*release)(struct kref *ref);
};
@@ -237,6 +267,11 @@ static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
return -EINVAL;
}
+static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+{
+ return 0;
+}
+
static inline int irq_can_set_affinity(unsigned int irq)
{
return 0;
@@ -317,13 +352,9 @@ static inline int disable_irq_wake(unsigned int irq)
#ifdef CONFIG_IRQ_FORCED_THREADING
-# ifndef CONFIG_PREEMPT_RT_BASE
extern bool force_irqthreads;
-# else
-# define force_irqthreads (true)
-# endif
#else
-#define force_irqthreads (false)
+#define force_irqthreads (0)
#endif
#ifndef __ARCH_SET_SOFTIRQ_PENDING
@@ -379,14 +410,8 @@ struct softirq_action
void (*action)(struct softirq_action *);
};
-#ifndef CONFIG_PREEMPT_RT_FULL
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
-static inline void thread_do_softirq(void) { do_softirq(); }
-#else
-extern void thread_do_softirq(void);
-#endif
-
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);
@@ -394,8 +419,6 @@ extern void __raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
-extern void softirq_check_pending_idle(void);
-
/* This is the worklist that queues up per-cpu softirq work.
*
* send_remote_sendirq() adds work to these lists, and
@@ -436,9 +459,8 @@ extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
to be executed on some cpu at least once after this.
* If the tasklet is already scheduled, but its execution is still not
started, it will be executed only once.
- * If this tasklet is already running on another CPU, it is rescheduled
- for later.
- * Schedule must not be called from the tasklet itself (a lockup occurs)
+ * If this tasklet is already running on another CPU (or schedule is called
+ from tasklet itself), it is rescheduled for later.
* Tasklet is strictly serialized wrt itself, but not
wrt another tasklets. If client needs some intertask synchronization,
he makes it with spinlocks.
@@ -463,36 +485,27 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
enum
{
TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
- TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
- TASKLET_STATE_PENDING /* Tasklet is pending */
+ TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
};
-#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
-#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
-#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
-
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+#ifdef CONFIG_SMP
static inline int tasklet_trylock(struct tasklet_struct *t)
{
return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
}
-static inline int tasklet_tryunlock(struct tasklet_struct *t)
-{
- return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
-}
-
static inline void tasklet_unlock(struct tasklet_struct *t)
{
smp_mb__before_clear_bit();
clear_bit(TASKLET_STATE_RUN, &(t)->state);
}
-extern void tasklet_unlock_wait(struct tasklet_struct *t);
-
+static inline void tasklet_unlock_wait(struct tasklet_struct *t)
+{
+ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
+}
#else
#define tasklet_trylock(t) 1
-#define tasklet_tryunlock(t) 1
#define tasklet_unlock_wait(t) do { } while (0)
#define tasklet_unlock(t) do { } while (0)
#endif
@@ -541,8 +554,17 @@ static inline void tasklet_disable(struct tasklet_struct *t)
smp_mb();
}
-extern void tasklet_enable(struct tasklet_struct *t);
-extern void tasklet_hi_enable(struct tasklet_struct *t);
+static inline void tasklet_enable(struct tasklet_struct *t)
+{
+ smp_mb__before_atomic_dec();
+ atomic_dec(&t->count);
+}
+
+static inline void tasklet_hi_enable(struct tasklet_struct *t)
+{
+ smp_mb__before_atomic_dec();
+ atomic_dec(&t->count);
+}
extern void tasklet_kill(struct tasklet_struct *t);
extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
@@ -574,12 +596,6 @@ void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
tasklet_kill(&ttimer->tasklet);
}
-#ifdef CONFIG_PREEMPT_RT_FULL
-extern void softirq_early_init(void);
-#else
-static inline void softirq_early_init(void) { }
-#endif
-
/*
* Autoprobing for irqs:
*
diff --git a/include/linux/irq.h b/include/linux/irq.h
index e2d8789..896824e 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -70,7 +70,6 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data);
* IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
* IRQ_NESTED_TRHEAD - Interrupt nests into another thread
* IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
- * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT)
*/
enum {
IRQ_TYPE_NONE = 0x00000000,
@@ -95,14 +94,12 @@ enum {
IRQ_NESTED_THREAD = (1 << 15),
IRQ_NOTHREAD = (1 << 16),
IRQ_PER_CPU_DEVID = (1 << 17),
- IRQ_NO_SOFTIRQ_CALL = (1 << 18),
};
#define IRQF_MODIFY_MASK \
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
- IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
- IRQ_NO_SOFTIRQ_CALL)
+ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID)
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
@@ -383,7 +380,8 @@ extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
-extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask);
+extern int irq_set_affinity_locked(struct irq_data *data,
+ const struct cpumask *cpumask, bool force);
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
void irq_move_irq(struct irq_data *data);
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 60c19ee..6601702 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -16,7 +16,6 @@
#define IRQ_WORK_BUSY 2UL
#define IRQ_WORK_FLAGS 3UL
#define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
-#define IRQ_WORK_HARD_IRQ 8UL /* Run hard IRQ context, even on RT */
struct irq_work {
unsigned long flags;
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index da992bc..a7b4b61 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -27,6 +27,8 @@ struct irq_desc;
* @irq_count: stats field to detect stalled irqs
* @last_unhandled: aging timer for unhandled count
* @irqs_unhandled: stats field for spurious unhandled interrupts
+ * @threads_handled: stats field for deferred spurious detection of threaded handlers
+ * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
* @lock: locking for SMP
* @affinity_hint: hint to user space for preferred irq affinity
* @affinity_notify: context for notification of affinity changes
@@ -52,7 +54,8 @@ struct irq_desc {
unsigned int irq_count; /* For detecting broken IRQs */
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
- u64 random_ip;
+ atomic_t threads_handled;
+ int threads_handled_last;
raw_spinlock_t lock;
struct cpumask *percpu_enabled;
#ifdef CONFIG_SMP
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 0977829..d176d65 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -25,6 +25,8 @@
# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
# define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
+# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
+# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
#else
# define trace_hardirqs_on() do { } while (0)
@@ -37,15 +39,9 @@
# define trace_softirqs_enabled(p) 0
# define trace_hardirq_enter() do { } while (0)
# define trace_hardirq_exit() do { } while (0)
-# define INIT_TRACE_IRQFLAGS
-#endif
-
-#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL)
-# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
-# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
-#else
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
+# define INIT_TRACE_IRQFLAGS
#endif
#if defined(CONFIG_IRQSOFF_TRACER) || \
@@ -151,23 +147,4 @@
#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
-/*
- * local_irq* variants depending on RT/!RT
- */
-#ifdef CONFIG_PREEMPT_RT_FULL
-# define local_irq_disable_nort() do { } while (0)
-# define local_irq_enable_nort() do { } while (0)
-# define local_irq_save_nort(flags) local_save_flags(flags)
-# define local_irq_restore_nort(flags) (void)(flags)
-# define local_irq_disable_rt() local_irq_disable()
-# define local_irq_enable_rt() local_irq_enable()
-#else
-# define local_irq_disable_nort() local_irq_disable()
-# define local_irq_enable_nort() local_irq_enable()
-# define local_irq_save_nort(flags) local_irq_save(flags)
-# define local_irq_restore_nort(flags) local_irq_restore(flags)
-# define local_irq_disable_rt() do { } while (0)
-# define local_irq_enable_rt() do { } while (0)
-#endif
-
#endif
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index d5b50a1..0dae71e 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -159,7 +159,11 @@ typedef struct journal_header_s
* journal_block_tag (in the descriptor). The other h_chksum* fields are
* not used.
*
- * Checksum v1 and v2 are mutually exclusive features.
+ * If FEATURE_INCOMPAT_CSUM_V3 is set, the descriptor block uses
+ * journal_block_tag3_t to store a full 32-bit checksum. Everything else
+ * is the same as v2.
+ *
+ * Checksum v1, v2, and v3 are mutually exclusive features.
*/
struct commit_header {
__be32 h_magic;
@@ -179,6 +183,14 @@ struct commit_header {
* raw struct shouldn't be used for pointer math or sizeof() - use
* journal_tag_bytes(journal) instead to compute this.
*/
+typedef struct journal_block_tag3_s
+{
+ __be32 t_blocknr; /* The on-disk block number */
+ __be32 t_flags; /* See below */
+ __be32 t_blocknr_high; /* most-significant high 32bits. */
+ __be32 t_checksum; /* crc32c(uuid+seq+block) */
+} journal_block_tag3_t;
+
typedef struct journal_block_tag_s
{
__be32 t_blocknr; /* The on-disk block number */
@@ -187,9 +199,6 @@ typedef struct journal_block_tag_s
__be32 t_blocknr_high; /* most-significant high 32bits. */
} journal_block_tag_t;
-#define JBD2_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high))
-#define JBD2_TAG_SIZE64 (sizeof(journal_block_tag_t))
-
/* Tail of descriptor block, for checksumming */
struct jbd2_journal_block_tail {
__be32 t_checksum; /* crc32c(uuid+descr_block) */
@@ -284,6 +293,7 @@ typedef struct journal_superblock_s
#define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002
#define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004
#define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008
+#define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010
/* Features known to this kernel version: */
#define JBD2_KNOWN_COMPAT_FEATURES JBD2_FEATURE_COMPAT_CHECKSUM
@@ -291,7 +301,8 @@ typedef struct journal_superblock_s
#define JBD2_KNOWN_INCOMPAT_FEATURES (JBD2_FEATURE_INCOMPAT_REVOKE | \
JBD2_FEATURE_INCOMPAT_64BIT | \
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \
- JBD2_FEATURE_INCOMPAT_CSUM_V2)
+ JBD2_FEATURE_INCOMPAT_CSUM_V2 | \
+ JBD2_FEATURE_INCOMPAT_CSUM_V3)
#ifdef __KERNEL__
@@ -1296,6 +1307,15 @@ static inline int tid_geq(tid_t x, tid_t y)
extern int jbd2_journal_blocks_per_page(struct inode *inode);
extern size_t journal_tag_bytes(journal_t *journal);
+static inline int jbd2_journal_has_csum_v2or3(journal_t *journal)
+{
+ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) ||
+ JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3))
+ return 1;
+
+ return 0;
+}
+
/*
* We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for
* transaction control blocks.
diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h
index a90a6f5..3dc5343 100644
--- a/include/linux/jbd_common.h
+++ b/include/linux/jbd_common.h
@@ -15,56 +15,32 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh)
static inline void jbd_lock_bh_state(struct buffer_head *bh)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_lock(BH_State, &bh->b_state);
-#else
- spin_lock(&bh->b_state_lock);
-#endif
}
static inline int jbd_trylock_bh_state(struct buffer_head *bh)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
return bit_spin_trylock(BH_State, &bh->b_state);
-#else
- return spin_trylock(&bh->b_state_lock);
-#endif
}
static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
return bit_spin_is_locked(BH_State, &bh->b_state);
-#else
- return spin_is_locked(&bh->b_state_lock);
-#endif
}
static inline void jbd_unlock_bh_state(struct buffer_head *bh)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_unlock(BH_State, &bh->b_state);
-#else
- spin_unlock(&bh->b_state_lock);
-#endif
}
static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_lock(BH_JournalHead, &bh->b_state);
-#else
- spin_lock(&bh->b_journal_head_lock);
-#endif
}
static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_unlock(BH_JournalHead, &bh->b_state);
-#else
- spin_unlock(&bh->b_journal_head_lock);
-#endif
}
#endif
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index d235e88..8acbb7b 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -258,23 +258,11 @@ extern unsigned long preset_lpj;
#define SEC_JIFFIE_SC (32 - SHIFT_HZ)
#endif
#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
-#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19)
#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
TICK_NSEC -1) / (u64)TICK_NSEC))
#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
TICK_NSEC -1) / (u64)TICK_NSEC))
-#define USEC_CONVERSION \
- ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\
- TICK_NSEC -1) / (u64)TICK_NSEC))
-/*
- * USEC_ROUND is used in the timeval to jiffie conversion. See there
- * for more details. It is the scaled resolution rounding value. Note
- * that it is a 64-bit value. Since, when it is applied, we are already
- * in jiffies (albit scaled), it is nothing but the bits we will shift
- * off.
- */
-#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1)
/*
* The maximum jiffie value is (MAX_INT >> 1). Here we translate that
* into seconds. The 64-bit case will overflow if we are not careful,
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 006627b..9216e46 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -49,8 +49,7 @@
#include <linux/types.h>
#include <linux/compiler.h>
-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) && \
- !defined(CONFIG_PREEMPT_BASE)
+#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
struct static_key {
atomic_t enabled;
@@ -63,6 +62,10 @@ struct static_key {
# include <asm/jump_label.h>
# define HAVE_JUMP_LABEL
+#else
+struct static_key {
+ atomic_t enabled;
+};
#endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */
enum jump_label_type {
@@ -73,6 +76,12 @@ enum jump_label_type {
struct module;
#include <linux/atomic.h>
+
+static inline int static_key_count(struct static_key *key)
+{
+ return atomic_read(&key->enabled);
+}
+
#ifdef HAVE_JUMP_LABEL
#define JUMP_LABEL_TRUE_BRANCH 1UL
@@ -123,24 +132,20 @@ extern void jump_label_apply_nops(struct module *mod);
#else /* !HAVE_JUMP_LABEL */
-struct static_key {
- atomic_t enabled;
-};
-
static __always_inline void jump_label_init(void)
{
}
static __always_inline bool static_key_false(struct static_key *key)
{
- if (unlikely(atomic_read(&key->enabled)) > 0)
+ if (unlikely(static_key_count(key) > 0))
return true;
return false;
}
static __always_inline bool static_key_true(struct static_key *key)
{
- if (likely(atomic_read(&key->enabled)) > 0)
+ if (likely(static_key_count(key) > 0))
return true;
return false;
}
@@ -180,7 +185,7 @@ static inline int jump_label_apply_nops(struct module *mod)
static inline bool static_key_enabled(struct static_key *key)
{
- return (atomic_read(&key->enabled) > 0);
+ return static_key_count(key) > 0;
}
#endif /* _LINUX_JUMP_LABEL_H */
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 680ad23..7f6fe6e 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -115,7 +115,7 @@ extern int kdb_trap_printk;
extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args);
extern __printf(1, 2) int kdb_printf(const char *, ...);
typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
-#define in_kdb_printk() (kdb_trap_printk)
+
extern void kdb_init(int level);
/* Access to kdb specific polling devices */
@@ -150,7 +150,6 @@ extern int kdb_register_repeat(char *, kdb_func_t, char *, char *,
extern int kdb_unregister(char *);
#else /* ! CONFIG_KGDB_KDB */
static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
-#define in_kdb_printk() (0)
static inline void kdb_init(int level) {}
static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
char *help, short minlen) { return 0; }
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index c34e608..672ddc4 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -412,7 +412,6 @@ extern enum system_states {
SYSTEM_HALT,
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
- SYSTEM_SUSPEND,
} system_state;
#define TAINT_PROPRIETARY_MODULE 0
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 51c72be..4b2053a 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -74,6 +74,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
* Number of interrupts per specific IRQ source, since bootup
*/
extern unsigned int kstat_irqs(unsigned int irq);
+extern unsigned int kstat_irqs_usr(unsigned int irq);
/*
* Number of interrupts per cpu, since bootup
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index c6e091b..bdfc95b 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -283,7 +283,7 @@ struct kgdb_io {
extern struct kgdb_arch arch_kgdb_ops;
-extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
+extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
#ifdef CONFIG_SERIAL_KGDB_NMI
extern int kgdb_register_nmi_console(void);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0fbbc7a..e47c7e2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -464,8 +464,6 @@ void kvm_exit(void);
void kvm_get_kvm(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);
-void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new,
- u64 last_generation);
static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
{
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index d2c0d6d..0d24e93 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -42,37 +42,22 @@
#endif
struct lglock {
-#ifndef CONFIG_PREEMPT_RT_FULL
arch_spinlock_t __percpu *lock;
-#else
- struct rt_mutex __percpu *lock;
-#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lock_class_key lock_key;
struct lockdep_map lock_dep_map;
#endif
};
-#ifndef CONFIG_PREEMPT_RT_FULL
-# define DEFINE_LGLOCK(name) \
+#define DEFINE_LGLOCK(name) \
static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
= __ARCH_SPIN_LOCK_UNLOCKED; \
struct lglock name = { .lock = &name ## _lock }
-# define DEFINE_STATIC_LGLOCK(name) \
+#define DEFINE_STATIC_LGLOCK(name) \
static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
= __ARCH_SPIN_LOCK_UNLOCKED; \
static struct lglock name = { .lock = &name ## _lock }
-#else
-
-# define DEFINE_LGLOCK(name) \
- static DEFINE_PER_CPU(struct rt_mutex, name ## _lock); \
- struct lglock name = { .lock = &name ## _lock }
-
-# define DEFINE_STATIC_LGLOCK(name) \
- static DEFINE_PER_CPU(struct rt_mutex, name ## _lock); \
- static struct lglock name = { .lock = &name ## _lock }
-#endif
void lg_lock_init(struct lglock *lg, char *name);
void lg_local_lock(struct lglock *lg);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index bec6dbe..e13b3ae 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -593,6 +593,7 @@ struct ata_host {
struct device *dev;
void __iomem * const *iomap;
unsigned int n_ports;
+ unsigned int n_tags; /* nr of NCQ tags */
void *private_data;
struct ata_port_operations *ops;
unsigned long flags;
@@ -822,6 +823,7 @@ struct ata_port {
unsigned long qc_allocated;
unsigned int qc_active;
int nr_active_links; /* #links with active qcs */
+ unsigned int last_tag; /* track next tag hw expects */
struct ata_link link; /* host default link */
struct ata_link *slave_link; /* see ata_slave_link_init() */
diff --git a/include/linux/list.h b/include/linux/list.h
index 885943e..2ece638 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -373,15 +373,20 @@ static inline void list_splice_tail_init(struct list_head *list,
(!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
/**
- * list_last_entry - get the last element from a list
- * @ptr: the list head to take the element from.
- * @type: the type of the struct this is embedded in.
+ * list_next_entry - get the next element in list
+ * @pos: the type * to cursor
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_next_entry(pos, member) \
+ list_entry((pos)->member.next, typeof(*(pos)), member)
+
+/**
+ * list_prev_entry - get the prev element in list
+ * @pos: the type * to cursor
* @member: the name of the list_struct within the struct.
- *
- * Note, that list is expected to be not empty.
*/
-#define list_last_entry(ptr, type, member) \
- list_entry((ptr)->prev, type, member)
+#define list_prev_entry(pos, member) \
+ list_entry((pos)->member.prev, typeof(*(pos)), member)
/**
* list_for_each - iterate over a list
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
index d8876a0..2eb8855 100644
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
@@ -2,7 +2,6 @@
#define _LINUX_LIST_BL_H
#include <linux/list.h>
-#include <linux/spinlock.h>
#include <linux/bit_spinlock.h>
/*
@@ -33,22 +32,13 @@
struct hlist_bl_head {
struct hlist_bl_node *first;
-#ifdef CONFIG_PREEMPT_RT_BASE
- raw_spinlock_t lock;
-#endif
};
struct hlist_bl_node {
struct hlist_bl_node *next, **pprev;
};
-
-static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h)
-{
- h->first = NULL;
-#ifdef CONFIG_PREEMPT_RT_BASE
- raw_spin_lock_init(&h->lock);
-#endif
-}
+#define INIT_HLIST_BL_HEAD(ptr) \
+ ((ptr)->first = NULL)
static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
{
@@ -127,26 +117,12 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n)
static inline void hlist_bl_lock(struct hlist_bl_head *b)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_lock(0, (unsigned long *)b);
-#else
- raw_spin_lock(&b->lock);
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
- __set_bit(0, (unsigned long *)b);
-#endif
-#endif
}
static inline void hlist_bl_unlock(struct hlist_bl_head *b)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
__bit_spin_unlock(0, (unsigned long *)b);
-#else
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
- __clear_bit(0, (unsigned long *)b);
-#endif
- raw_spin_unlock(&b->lock);
-#endif
}
static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)
diff --git a/include/linux/locallock.h b/include/linux/locallock.h
deleted file mode 100644
index 21653e9..0000000
--- a/include/linux/locallock.h
+++ /dev/null
@@ -1,270 +0,0 @@
-#ifndef _LINUX_LOCALLOCK_H
-#define _LINUX_LOCALLOCK_H
-
-#include <linux/percpu.h>
-#include <linux/spinlock.h>
-
-#ifdef CONFIG_PREEMPT_RT_BASE
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-# define LL_WARN(cond) WARN_ON(cond)
-#else
-# define LL_WARN(cond) do { } while (0)
-#endif
-
-/*
- * per cpu lock based substitute for local_irq_*()
- */
-struct local_irq_lock {
- spinlock_t lock;
- struct task_struct *owner;
- int nestcnt;
- unsigned long flags;
-};
-
-#define DEFINE_LOCAL_IRQ_LOCK(lvar) \
- DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \
- .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) }
-
-#define DECLARE_LOCAL_IRQ_LOCK(lvar) \
- DECLARE_PER_CPU(struct local_irq_lock, lvar)
-
-#define local_irq_lock_init(lvar) \
- do { \
- int __cpu; \
- for_each_possible_cpu(__cpu) \
- spin_lock_init(&per_cpu(lvar, __cpu).lock); \
- } while (0)
-
-/*
- * spin_lock|trylock|unlock_local flavour that does not migrate disable
- * used for __local_lock|trylock|unlock where get_local_var/put_local_var
- * already takes care of the migrate_disable/enable
- * for CONFIG_PREEMPT_BASE map to the normal spin_* calls.
- */
-#ifdef CONFIG_PREEMPT_RT_FULL
-# define spin_lock_local(lock) rt_spin_lock(lock)
-# define spin_trylock_local(lock) rt_spin_trylock(lock)
-# define spin_unlock_local(lock) rt_spin_unlock(lock)
-#else
-# define spin_lock_local(lock) spin_lock(lock)
-# define spin_trylock_local(lock) spin_trylock(lock)
-# define spin_unlock_local(lock) spin_unlock(lock)
-#endif
-
-static inline void __local_lock(struct local_irq_lock *lv)
-{
- if (lv->owner != current) {
- spin_lock_local(&lv->lock);
- LL_WARN(lv->owner);
- LL_WARN(lv->nestcnt);
- lv->owner = current;
- }
- lv->nestcnt++;
-}
-
-#define local_lock(lvar) \
- do { __local_lock(&get_local_var(lvar)); } while (0)
-
-static inline int __local_trylock(struct local_irq_lock *lv)
-{
- if (lv->owner != current && spin_trylock_local(&lv->lock)) {
- LL_WARN(lv->owner);
- LL_WARN(lv->nestcnt);
- lv->owner = current;
- lv->nestcnt = 1;
- return 1;
- }
- return 0;
-}
-
-#define local_trylock(lvar) \
- ({ \
- int __locked; \
- __locked = __local_trylock(&get_local_var(lvar)); \
- if (!__locked) \
- put_local_var(lvar); \
- __locked; \
- })
-
-static inline void __local_unlock(struct local_irq_lock *lv)
-{
- LL_WARN(lv->nestcnt == 0);
- LL_WARN(lv->owner != current);
- if (--lv->nestcnt)
- return;
-
- lv->owner = NULL;
- spin_unlock_local(&lv->lock);
-}
-
-#define local_unlock(lvar) \
- do { \
- __local_unlock(&__get_cpu_var(lvar)); \
- put_local_var(lvar); \
- } while (0)
-
-static inline void __local_lock_irq(struct local_irq_lock *lv)
-{
- spin_lock_irqsave(&lv->lock, lv->flags);
- LL_WARN(lv->owner);
- LL_WARN(lv->nestcnt);
- lv->owner = current;
- lv->nestcnt = 1;
-}
-
-#define local_lock_irq(lvar) \
- do { __local_lock_irq(&get_local_var(lvar)); } while (0)
-
-#define local_lock_irq_on(lvar, cpu) \
- do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0)
-
-static inline void __local_unlock_irq(struct local_irq_lock *lv)
-{
- LL_WARN(!lv->nestcnt);
- LL_WARN(lv->owner != current);
- lv->owner = NULL;
- lv->nestcnt = 0;
- spin_unlock_irq(&lv->lock);
-}
-
-#define local_unlock_irq(lvar) \
- do { \
- __local_unlock_irq(&__get_cpu_var(lvar)); \
- put_local_var(lvar); \
- } while (0)
-
-#define local_unlock_irq_on(lvar, cpu) \
- do { \
- __local_unlock_irq(&per_cpu(lvar, cpu)); \
- } while (0)
-
-static inline int __local_lock_irqsave(struct local_irq_lock *lv)
-{
- if (lv->owner != current) {
- __local_lock_irq(lv);
- return 0;
- } else {
- lv->nestcnt++;
- return 1;
- }
-}
-
-#define local_lock_irqsave(lvar, _flags) \
- do { \
- if (__local_lock_irqsave(&get_local_var(lvar))) \
- put_local_var(lvar); \
- _flags = __get_cpu_var(lvar).flags; \
- } while (0)
-
-#define local_lock_irqsave_on(lvar, _flags, cpu) \
- do { \
- __local_lock_irqsave(&per_cpu(lvar, cpu)); \
- _flags = per_cpu(lvar, cpu).flags; \
- } while (0)
-
-static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
- unsigned long flags)
-{
- LL_WARN(!lv->nestcnt);
- LL_WARN(lv->owner != current);
- if (--lv->nestcnt)
- return 0;
-
- lv->owner = NULL;
- spin_unlock_irqrestore(&lv->lock, lv->flags);
- return 1;
-}
-
-#define local_unlock_irqrestore(lvar, flags) \
- do { \
- if (__local_unlock_irqrestore(&__get_cpu_var(lvar), flags)) \
- put_local_var(lvar); \
- } while (0)
-
-#define local_unlock_irqrestore_on(lvar, flags, cpu) \
- do { \
- __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \
- } while (0)
-
-#define local_spin_trylock_irq(lvar, lock) \
- ({ \
- int __locked; \
- local_lock_irq(lvar); \
- __locked = spin_trylock(lock); \
- if (!__locked) \
- local_unlock_irq(lvar); \
- __locked; \
- })
-
-#define local_spin_lock_irq(lvar, lock) \
- do { \
- local_lock_irq(lvar); \
- spin_lock(lock); \
- } while (0)
-
-#define local_spin_unlock_irq(lvar, lock) \
- do { \
- spin_unlock(lock); \
- local_unlock_irq(lvar); \
- } while (0)
-
-#define local_spin_lock_irqsave(lvar, lock, flags) \
- do { \
- local_lock_irqsave(lvar, flags); \
- spin_lock(lock); \
- } while (0)
-
-#define local_spin_unlock_irqrestore(lvar, lock, flags) \
- do { \
- spin_unlock(lock); \
- local_unlock_irqrestore(lvar, flags); \
- } while (0)
-
-#define get_locked_var(lvar, var) \
- (*({ \
- local_lock(lvar); \
- &__get_cpu_var(var); \
- }))
-
-#define put_locked_var(lvar, var) local_unlock(lvar);
-
-#define local_lock_cpu(lvar) \
- ({ \
- local_lock(lvar); \
- smp_processor_id(); \
- })
-
-#define local_unlock_cpu(lvar) local_unlock(lvar)
-
-#else /* PREEMPT_RT_BASE */
-
-#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar
-#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar
-
-static inline void local_irq_lock_init(int lvar) { }
-
-#define local_lock(lvar) preempt_disable()
-#define local_unlock(lvar) preempt_enable()
-#define local_lock_irq(lvar) local_irq_disable()
-#define local_unlock_irq(lvar) local_irq_enable()
-#define local_lock_irqsave(lvar, flags) local_irq_save(flags)
-#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags)
-
-#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock)
-#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock)
-#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock)
-#define local_spin_lock_irqsave(lvar, lock, flags) \
- spin_lock_irqsave(lock, flags)
-#define local_spin_unlock_irqrestore(lvar, lock, flags) \
- spin_unlock_irqrestore(lock, flags)
-
-#define get_locked_var(lvar, var) get_cpu_var(var)
-#define put_locked_var(lvar, var) put_cpu_var(var)
-
-#define local_lock_cpu(lvar) get_cpu()
-#define local_unlock_cpu(lvar) put_cpu()
-
-#endif
-
-#endif
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index da6716b..ccc1b71 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -175,6 +175,12 @@ static inline int vma_migratable(struct vm_area_struct *vma)
{
if (vma->vm_flags & (VM_IO | VM_PFNMAP))
return 0;
+
+#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
+ if (vma->vm_flags & VM_HUGETLB)
+ return 0;
+#endif
+
/*
* Migration allocates pages in the highest zone. If we cannot
* do so then migration (at least from node to node) is not
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index ee8b14a..449905e 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -5,7 +5,9 @@
#include <linux/mempolicy.h>
#include <linux/migrate_mode.h>
-typedef struct page *new_page_t(struct page *, unsigned long private, int **);
+typedef struct page *new_page_t(struct page *page, unsigned long private,
+ int **reason);
+typedef void free_page_t(struct page *page, unsigned long private);
/*
* Return values from addresss_space_operations.migratepage():
@@ -39,7 +41,7 @@ extern void putback_lru_pages(struct list_head *l);
extern void putback_movable_pages(struct list_head *l);
extern int migrate_page(struct address_space *,
struct page *, struct page *, enum migrate_mode);
-extern int migrate_pages(struct list_head *l, new_page_t x,
+extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
unsigned long private, enum migrate_mode mode, int reason);
extern int fail_migrate_page(struct address_space *,
@@ -61,8 +63,9 @@ extern int migrate_page_move_mapping(struct address_space *mapping,
static inline void putback_lru_pages(struct list_head *l) {}
static inline void putback_movable_pages(struct list_head *l) {}
-static inline int migrate_pages(struct list_head *l, new_page_t x,
- unsigned long private, enum migrate_mode mode, int reason)
+static inline int migrate_pages(struct list_head *l, new_page_t new,
+ free_page_t free, unsigned long private, enum migrate_mode mode,
+ int reason)
{ return -ENOSYS; }
static inline int migrate_prep(void) { return -ENOSYS; }
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2acbab4..306f0d4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -919,6 +919,14 @@ extern void show_free_areas(unsigned int flags);
extern bool skip_free_areas_node(unsigned int flags, int nid);
int shmem_zero_setup(struct vm_area_struct *);
+#ifdef CONFIG_SHMEM
+bool shmem_mapping(struct address_space *mapping);
+#else
+static inline bool shmem_mapping(struct address_space *mapping)
+{
+ return false;
+}
+#endif
extern int can_do_mlock(void);
extern int user_shm_lock(size_t, struct user_struct *);
@@ -1001,6 +1009,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
+void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
int truncate_inode_page(struct address_space *mapping, struct page *page);
int generic_error_remove_page(struct address_space *mapping, struct page *page);
@@ -1252,59 +1261,27 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
* overflow into the next struct page (as it might with DEBUG_SPINLOCK).
* When freeing, reset page->mapping so free_pages_check won't complain.
*/
-#ifndef CONFIG_PREEMPT_RT_FULL
-
#define __pte_lockptr(page) &((page)->ptl)
-
-static inline struct page *pte_lock_init(struct page *page)
-{
- spin_lock_init(__pte_lockptr(page));
- return page;
-}
-
+#define pte_lock_init(_page) do { \
+ spin_lock_init(__pte_lockptr(_page)); \
+} while (0)
#define pte_lock_deinit(page) ((page)->mapping = NULL)
-
-#else /* !PREEMPT_RT_FULL */
-
-/*
- * On PREEMPT_RT_FULL the spinlock_t's are too large to embed in the
- * page frame, hence it only has a pointer and we need to dynamically
- * allocate the lock when we allocate PTE-pages.
- *
- * This is an overall win, since only a small fraction of the pages
- * will be PTE pages under normal circumstances.
- */
-
-#define __pte_lockptr(page) ((page)->ptl)
-
-extern struct page *pte_lock_init(struct page *page);
-extern void pte_lock_deinit(struct page *page);
-
-#endif /* PREEMPT_RT_FULL */
-
#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
#else /* !USE_SPLIT_PTLOCKS */
/*
* We use mm->page_table_lock to guard all pagetable pages of the mm.
*/
-static inline struct page *pte_lock_init(struct page *page) { return page; }
+#define pte_lock_init(page) do {} while (0)
#define pte_lock_deinit(page) do {} while (0)
#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
#endif /* USE_SPLIT_PTLOCKS */
-static inline struct page *__pgtable_page_ctor(struct page *page)
+static inline void pgtable_page_ctor(struct page *page)
{
- page = pte_lock_init(page);
- if (page)
- inc_zone_page_state(page, NR_PAGETABLE);
- return page;
+ pte_lock_init(page);
+ inc_zone_page_state(page, NR_PAGETABLE);
}
-#define pgtable_page_ctor(page) \
-do { \
- page = __pgtable_page_ctor(page); \
-} while (0)
-
static inline void pgtable_page_dtor(struct page *page)
{
pte_lock_deinit(page);
@@ -1655,9 +1632,6 @@ void page_cache_async_readahead(struct address_space *mapping,
unsigned long size);
unsigned long max_sane_readahead(unsigned long nr);
-unsigned long ra_submit(struct file_ra_state *ra,
- struct address_space *mapping,
- struct file *filp);
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
@@ -1668,7 +1642,7 @@ extern int expand_downwards(struct vm_area_struct *vma,
#if VM_GROWSUP
extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
#else
- #define expand_upwards(vma, address) do { } while (0)
+ #define expand_upwards(vma, address) (0)
#endif
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index d87823c..b8131e7 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -11,7 +11,6 @@
#include <linux/completion.h>
#include <linux/cpumask.h>
#include <linux/page-debug-flags.h>
-#include <linux/rcupdate.h>
#include <linux/uprobes.h>
#include <linux/page-flags-layout.h>
#include <asm/page.h>
@@ -143,11 +142,7 @@ struct page {
* system if PG_buddy is set.
*/
#if USE_SPLIT_PTLOCKS
-# ifndef CONFIG_PREEMPT_RT_FULL
spinlock_t ptl;
-# else
- spinlock_t *ptl;
-# endif
#endif
struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
struct page *first_page; /* Compound tail pages */
@@ -329,9 +324,9 @@ struct mm_rss_stat {
struct kioctx_table;
struct mm_struct {
- struct vm_area_struct * mmap; /* list of VMAs */
+ struct vm_area_struct *mmap; /* list of VMAs */
struct rb_root mm_rb;
- struct vm_area_struct * mmap_cache; /* last find_vma result */
+ u32 vmacache_seqnum; /* per-thread vmacache */
#ifdef CONFIG_MMU
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
@@ -449,9 +444,6 @@ struct mm_struct {
bool tlb_flush_pending;
#endif
struct uprobes_state uprobes_state;
-#ifdef CONFIG_PREEMPT_RT_BASE
- struct rcu_head delayed_drop;
-#endif
};
/* first nid will either be a valid NID or one of these values */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index bd791e4..450f19c 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -75,9 +75,18 @@ enum {
extern int page_group_by_mobility_disabled;
-static inline int get_pageblock_migratetype(struct page *page)
+#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
+#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
+
+#define get_pageblock_migratetype(page) \
+ get_pfnblock_flags_mask(page, page_to_pfn(page), \
+ PB_migrate_end, MIGRATETYPE_MASK)
+
+static inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
{
- return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
+ BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2);
+ return get_pfnblock_flags_mask(page, pfn, PB_migrate_end,
+ MIGRATETYPE_MASK);
}
struct free_area {
@@ -134,6 +143,7 @@ enum zone_stat_item {
NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
NR_DIRTIED, /* page dirtyings since bootup */
NR_WRITTEN, /* page writings since bootup */
+ NR_PAGES_SCANNED, /* pages scanned since last reclaim */
#ifdef CONFIG_NUMA
NUMA_HIT, /* allocated in intended node */
NUMA_MISS, /* allocated in non intended node */
@@ -312,19 +322,12 @@ enum zone_type {
#ifndef __GENERATING_BOUNDS_H
struct zone {
- /* Fields commonly accessed by the page allocator */
+ /* Read-mostly fields */
/* zone watermarks, access with *_wmark_pages(zone) macros */
unsigned long watermark[NR_WMARK];
/*
- * When free pages are below this point, additional steps are taken
- * when reading the number of free pages to avoid per-cpu counter
- * drift allowing watermarks to be breached
- */
- unsigned long percpu_drift_mark;
-
- /*
* We don't know if the memory that we're going to allocate will be freeable
* or/and it will be released eventually, so to avoid totally wasting several
* GB of ram we must reserve some of the lower zone memory (otherwise we risk
@@ -332,40 +335,26 @@ struct zone {
* on the higher zones). This array is recalculated at runtime if the
* sysctl_lowmem_reserve_ratio sysctl changes.
*/
- unsigned long lowmem_reserve[MAX_NR_ZONES];
-
- /*
- * This is a per-zone reserve of pages that should not be
- * considered dirtyable memory.
- */
- unsigned long dirty_balance_reserve;
+ long lowmem_reserve[MAX_NR_ZONES];
#ifdef CONFIG_NUMA
int node;
+#endif
+
/*
- * zone reclaim becomes active if more unmapped pages exist.
+ * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
+ * this zone's LRU. Maintained by the pageout code.
*/
- unsigned long min_unmapped_pages;
- unsigned long min_slab_pages;
-#endif
+ unsigned int inactive_ratio;
+
+ struct pglist_data *zone_pgdat;
struct per_cpu_pageset __percpu *pageset;
+
/*
- * free areas of different sizes
+ * This is a per-zone reserve of pages that should not be
+ * considered dirtyable memory.
*/
- spinlock_t lock;
-#if defined CONFIG_COMPACTION || defined CONFIG_CMA
- /* Set to true when the PG_migrate_skip bits should be cleared */
- bool compact_blockskip_flush;
-
- /* pfns where compaction scanners should start */
- unsigned long compact_cached_free_pfn;
- unsigned long compact_cached_migrate_pfn;
-#endif
-#ifdef CONFIG_MEMORY_HOTPLUG
- /* see spanned/present_pages for more description */
- seqlock_t span_seqlock;
-#endif
- struct free_area free_area[MAX_ORDER];
+ unsigned long dirty_balance_reserve;
#ifndef CONFIG_SPARSEMEM
/*
@@ -375,71 +364,14 @@ struct zone {
unsigned long *pageblock_flags;
#endif /* CONFIG_SPARSEMEM */
-#ifdef CONFIG_COMPACTION
- /*
- * On compaction failure, 1<<compact_defer_shift compactions
- * are skipped before trying again. The number attempted since
- * last failure is tracked with compact_considered.
- */
- unsigned int compact_considered;
- unsigned int compact_defer_shift;
- int compact_order_failed;
-#endif
-
- ZONE_PADDING(_pad1_)
-
- /* Fields commonly accessed by the page reclaim scanner */
- spinlock_t lru_lock;
- struct lruvec lruvec;
-
- unsigned long pages_scanned; /* since last reclaim */
- unsigned long flags; /* zone flags, see below */
-
- /* Zone statistics */
- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
-
- /*
- * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
- * this zone's LRU. Maintained by the pageout code.
- */
- unsigned int inactive_ratio;
-
-
- ZONE_PADDING(_pad2_)
- /* Rarely used or read-mostly fields */
-
+#ifdef CONFIG_NUMA
/*
- * wait_table -- the array holding the hash table
- * wait_table_hash_nr_entries -- the size of the hash table array
- * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
- *
- * The purpose of all these is to keep track of the people
- * waiting for a page to become available and make them
- * runnable again when possible. The trouble is that this
- * consumes a lot of space, especially when so few things
- * wait on pages at a given time. So instead of using
- * per-page waitqueues, we use a waitqueue hash table.
- *
- * The bucket discipline is to sleep on the same queue when
- * colliding and wake all in that wait queue when removing.
- * When something wakes, it must check to be sure its page is
- * truly available, a la thundering herd. The cost of a
- * collision is great, but given the expected load of the
- * table, they should be so rare as to be outweighed by the
- * benefits from the saved space.
- *
- * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
- * primary users of these fields, and in mm/page_alloc.c
- * free_area_init_core() performs the initialization of them.
+ * zone reclaim becomes active if more unmapped pages exist.
*/
- wait_queue_head_t * wait_table;
- unsigned long wait_table_hash_nr_entries;
- unsigned long wait_table_bits;
+ unsigned long min_unmapped_pages;
+ unsigned long min_slab_pages;
+#endif /* CONFIG_NUMA */
- /*
- * Discontig memory support fields.
- */
- struct pglist_data *zone_pgdat;
/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
unsigned long zone_start_pfn;
@@ -485,14 +417,103 @@ struct zone {
* adjust_managed_page_count() should be used instead of directly
* touching zone->managed_pages and totalram_pages.
*/
+ unsigned long managed_pages;
unsigned long spanned_pages;
unsigned long present_pages;
- unsigned long managed_pages;
+
+ const char *name;
/*
- * rarely used fields:
+ * Number of MIGRATE_RESEVE page block. To maintain for just
+ * optimization. Protected by zone->lock.
*/
- const char *name;
+ int nr_migrate_reserve_block;
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+ /* see spanned/present_pages for more description */
+ seqlock_t span_seqlock;
+#endif
+
+ /*
+ * wait_table -- the array holding the hash table
+ * wait_table_hash_nr_entries -- the size of the hash table array
+ * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
+ *
+ * The purpose of all these is to keep track of the people
+ * waiting for a page to become available and make them
+ * runnable again when possible. The trouble is that this
+ * consumes a lot of space, especially when so few things
+ * wait on pages at a given time. So instead of using
+ * per-page waitqueues, we use a waitqueue hash table.
+ *
+ * The bucket discipline is to sleep on the same queue when
+ * colliding and wake all in that wait queue when removing.
+ * When something wakes, it must check to be sure its page is
+ * truly available, a la thundering herd. The cost of a
+ * collision is great, but given the expected load of the
+ * table, they should be so rare as to be outweighed by the
+ * benefits from the saved space.
+ *
+ * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
+ * primary users of these fields, and in mm/page_alloc.c
+ * free_area_init_core() performs the initialization of them.
+ */
+ wait_queue_head_t *wait_table;
+ unsigned long wait_table_hash_nr_entries;
+ unsigned long wait_table_bits;
+
+ ZONE_PADDING(_pad1_)
+
+ /* Write-intensive fields used from the page allocator */
+ spinlock_t lock;
+
+ /* free areas of different sizes */
+ struct free_area free_area[MAX_ORDER];
+
+ /* zone flags, see below */
+ unsigned long flags;
+
+ ZONE_PADDING(_pad2_)
+
+ /* Write-intensive fields used by page reclaim */
+
+ /* Fields commonly accessed by the page reclaim scanner */
+ spinlock_t lru_lock;
+ struct lruvec lruvec;
+
+ /*
+ * When free pages are below this point, additional steps are taken
+ * when reading the number of free pages to avoid per-cpu counter
+ * drift allowing watermarks to be breached
+ */
+ unsigned long percpu_drift_mark;
+
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+ /* pfn where compaction free scanner should start */
+ unsigned long compact_cached_free_pfn;
+ /* pfn where async and sync compaction migration scanner should start */
+ unsigned long compact_cached_migrate_pfn[2];
+#endif
+
+#ifdef CONFIG_COMPACTION
+ /*
+ * On compaction failure, 1<<compact_defer_shift compactions
+ * are skipped before trying again. The number attempted since
+ * last failure is tracked with compact_considered.
+ */
+ unsigned int compact_considered;
+ unsigned int compact_defer_shift;
+ int compact_order_failed;
+#endif
+
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+ /* Set to true when the PG_migrate_skip bits should be cleared */
+ bool compact_blockskip_flush;
+#endif
+
+ ZONE_PADDING(_pad3_)
+ /* Zone statistics */
+ atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
} ____cacheline_internodealigned_in_smp;
typedef enum {
@@ -508,6 +529,7 @@ typedef enum {
ZONE_WRITEBACK, /* reclaim scanning has recently found
* many pages under writeback
*/
+ ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */
} zone_flags_t;
static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
@@ -545,6 +567,11 @@ static inline int zone_is_reclaim_locked(const struct zone *zone)
return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
}
+static inline int zone_is_fair_depleted(const struct zone *zone)
+{
+ return test_bit(ZONE_FAIR_DEPLETED, &zone->flags);
+}
+
static inline int zone_is_oom_locked(const struct zone *zone)
{
return test_bit(ZONE_OOM_LOCKED, &zone->flags);
@@ -799,10 +826,10 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat)
extern struct mutex zonelists_mutex;
void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
-bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
- int classzone_idx, int alloc_flags);
-bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
- int classzone_idx, int alloc_flags);
+bool zone_watermark_ok(struct zone *z, unsigned int order,
+ unsigned long mark, int classzone_idx, int alloc_flags);
+bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
+ unsigned long mark, int classzone_idx, int alloc_flags);
enum memmap_context {
MEMMAP_EARLY,
MEMMAP_HOTPLUG,
diff --git a/include/linux/module.h b/include/linux/module.h
index 05f2447..54aef1b 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -143,7 +143,7 @@ extern const struct gtype##_id __mod_##gtype##_table \
#define MODULE_DESCRIPTION(_description) MODULE_INFO(description, _description)
#define MODULE_DEVICE_TABLE(type,name) \
- MODULE_GENERIC_TABLE(type##_device,name)
+ MODULE_GENERIC_TABLE(type##__##name##_device, name)
/* Version of form [<epoch>:]<version>[-<extra-version>].
Or for CVS/RCS ID version, everything but the number is stripped.
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 38cd98f..22e5b96 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -42,11 +42,18 @@ struct mnt_namespace;
* flag, consider how it interacts with shared mounts.
*/
#define MNT_SHARED_MASK (MNT_UNBINDABLE)
-#define MNT_PROPAGATION_MASK (MNT_SHARED | MNT_UNBINDABLE)
+#define MNT_USER_SETTABLE_MASK (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \
+ | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \
+ | MNT_READONLY)
+#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
#define MNT_INTERNAL 0x4000
+#define MNT_LOCK_ATIME 0x040000
+#define MNT_LOCK_NOEXEC 0x080000
+#define MNT_LOCK_NOSUID 0x100000
+#define MNT_LOCK_NODEV 0x200000
#define MNT_LOCK_READONLY 0x400000
#define MNT_LOCKED 0x800000
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 79e172a..bab49da 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -18,17 +18,6 @@
#include <linux/atomic.h>
#include <asm/processor.h>
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
- , .dep_map = { .name = #lockname }
-#else
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
-#endif
-
-#ifdef CONFIG_PREEMPT_RT_FULL
-# include <linux/mutex_rt.h>
-#else
-
/*
* Simple, straightforward mutexes with strict semantics:
*
@@ -110,6 +99,13 @@ do { \
static inline void mutex_destroy(struct mutex *lock) {}
#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
+ , .dep_map = { .name = #lockname }
+#else
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
+#endif
+
#define __MUTEX_INITIALIZER(lockname) \
{ .count = ATOMIC_INIT(1) \
, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
@@ -177,8 +173,6 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
extern int mutex_trylock(struct mutex *lock);
extern void mutex_unlock(struct mutex *lock);
-#endif /* !PREEMPT_RT_FULL */
-
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
#ifndef arch_mutex_cpu_relax
diff --git a/include/linux/mutex_rt.h b/include/linux/mutex_rt.h
deleted file mode 100644
index c38a44b..0000000
--- a/include/linux/mutex_rt.h
+++ /dev/null
@@ -1,84 +0,0 @@
-#ifndef __LINUX_MUTEX_RT_H
-#define __LINUX_MUTEX_RT_H
-
-#ifndef __LINUX_MUTEX_H
-#error "Please include mutex.h"
-#endif
-
-#include <linux/rtmutex.h>
-
-/* FIXME: Just for __lockfunc */
-#include <linux/spinlock.h>
-
-struct mutex {
- struct rt_mutex lock;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-};
-
-#define __MUTEX_INITIALIZER(mutexname) \
- { \
- .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \
- __DEP_MAP_MUTEX_INITIALIZER(mutexname) \
- }
-
-#define DEFINE_MUTEX(mutexname) \
- struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
-
-extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key);
-extern void __lockfunc _mutex_lock(struct mutex *lock);
-extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
-extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
-extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass);
-extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
-extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass);
-extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass);
-extern int __lockfunc _mutex_trylock(struct mutex *lock);
-extern void __lockfunc _mutex_unlock(struct mutex *lock);
-
-#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock)
-#define mutex_lock(l) _mutex_lock(l)
-#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l)
-#define mutex_lock_killable(l) _mutex_lock_killable(l)
-#define mutex_trylock(l) _mutex_trylock(l)
-#define mutex_unlock(l) _mutex_unlock(l)
-#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock)
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s)
-# define mutex_lock_interruptible_nested(l, s) \
- _mutex_lock_interruptible_nested(l, s)
-# define mutex_lock_killable_nested(l, s) \
- _mutex_lock_killable_nested(l, s)
-
-# define mutex_lock_nest_lock(lock, nest_lock) \
-do { \
- typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
- _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
-} while (0)
-
-#else
-# define mutex_lock_nested(l, s) _mutex_lock(l)
-# define mutex_lock_interruptible_nested(l, s) \
- _mutex_lock_interruptible(l)
-# define mutex_lock_killable_nested(l, s) \
- _mutex_lock_killable(l)
-# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
-#endif
-
-# define mutex_init(mutex) \
-do { \
- static struct lock_class_key __key; \
- \
- rt_mutex_init(&(mutex)->lock); \
- __mutex_do_init((mutex), #mutex, &__key); \
-} while (0)
-
-# define __mutex_init(mutex, name, key) \
-do { \
- rt_mutex_init(&(mutex)->lock); \
- __mutex_do_init((mutex), name, key); \
-} while (0)
-
-#endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 1f48a4e..51bfd7a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1214,7 +1214,7 @@ struct net_device {
unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
unsigned char addr_assign_type; /* hw address assignment type */
unsigned char addr_len; /* hardware address length */
- unsigned short neigh_priv_len;
+ unsigned char neigh_priv_len;
unsigned short dev_id; /* Used to differentiate devices
* that share the same link
* layer address
@@ -1930,7 +1930,6 @@ struct softnet_data {
unsigned int dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
- struct sk_buff_head tofree_queue;
#ifdef CONFIG_NET_FLOW_LIMIT
struct sd_flow_limit __rcu *flow_limit;
@@ -2893,6 +2892,20 @@ extern const char *netdev_drivername(const struct net_device *dev);
extern void linkwatch_run_queue(void);
+static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
+ netdev_features_t f2)
+{
+ if (f1 & NETIF_F_GEN_CSUM)
+ f1 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
+ if (f2 & NETIF_F_GEN_CSUM)
+ f2 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
+ f1 &= f2;
+ if (f1 & NETIF_F_GEN_CSUM)
+ f1 &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
+
+ return f1;
+}
+
static inline netdev_features_t netdev_get_wanted_features(
struct net_device *dev)
{
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 7d083af..dd49566 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -3,7 +3,6 @@
#include <linux/netdevice.h>
-#include <linux/locallock.h>
#include <uapi/linux/netfilter/x_tables.h>
/**
@@ -285,8 +284,6 @@ extern void xt_free_table_info(struct xt_table_info *info);
*/
DECLARE_PER_CPU(seqcount_t, xt_recseq);
-DECLARE_LOCAL_IRQ_LOCK(xt_write_lock);
-
/**
* xt_write_recseq_begin - start of a write section
*
@@ -301,9 +298,6 @@ static inline unsigned int xt_write_recseq_begin(void)
{
unsigned int addend;
- /* RT protection */
- local_lock(xt_write_lock);
-
/*
* Low order bit of sequence is set if we already
* called xt_write_recseq_begin().
@@ -334,7 +328,6 @@ static inline void xt_write_recseq_end(unsigned int addend)
/* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
smp_wmb();
__this_cpu_add(xt_recseq.sequence, addend);
- local_unlock(xt_write_lock);
}
/*
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 7a6c396..8b50a62 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -16,9 +16,10 @@ static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
}
enum netlink_skb_flags {
- NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */
- NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */
- NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */
+ NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */
+ NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */
+ NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */
+ NETLINK_SKB_DST = 0x8, /* Dst set in sendto or sendmsg */
};
struct netlink_skb_parms {
@@ -171,4 +172,11 @@ extern int netlink_add_tap(struct netlink_tap *nt);
extern int __netlink_remove_tap(struct netlink_tap *nt);
extern int netlink_remove_tap(struct netlink_tap *nt);
+bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
+ struct user_namespace *ns, int cap);
+bool netlink_ns_capable(const struct sk_buff *skb,
+ struct user_namespace *ns, int cap);
+bool netlink_capable(const struct sk_buff *skb, int cap);
+bool netlink_net_capable(const struct sk_buff *skb, int cap);
+
#endif /* __LINUX_NETLINK_H */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 2b30701..715671e 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1223,11 +1223,22 @@ struct nfs41_free_stateid_res {
unsigned int status;
};
+static inline void
+nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
+{
+ kfree(cinfo->buckets);
+}
+
#else
struct pnfs_ds_commit_info {
};
+static inline void
+nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
+{
+}
+
#endif /* CONFIG_NFS_V4_1 */
struct nfs_page;
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 2e4414a..d14a4c3 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -6,7 +6,7 @@
*
* Alan Cox <Alan.Cox@linux.org>
*/
-
+
#ifndef _LINUX_NOTIFIER_H
#define _LINUX_NOTIFIER_H
#include <linux/errno.h>
@@ -42,7 +42,9 @@
* in srcu_notifier_call_chain(): no cache bounces and no memory barriers.
* As compensation, srcu_notifier_chain_unregister() is rather expensive.
* SRCU notifier chains should be used when the chain will be called very
- * often but notifier_blocks will seldom be removed.
+ * often but notifier_blocks will seldom be removed. Also, SRCU notifier
+ * chains are slightly more difficult to use because they require special
+ * runtime initialization.
*/
typedef int (*notifier_fn_t)(struct notifier_block *nb,
@@ -86,7 +88,7 @@ struct srcu_notifier_head {
(name)->head = NULL; \
} while (0)
-/* srcu_notifier_heads must be cleaned up dynamically */
+/* srcu_notifier_heads must be initialized and cleaned up dynamically */
extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
#define srcu_cleanup_notifier_head(name) \
cleanup_srcu_struct(&(name)->srcu);
@@ -99,13 +101,7 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
.head = NULL }
#define RAW_NOTIFIER_INIT(name) { \
.head = NULL }
-
-#define SRCU_NOTIFIER_INIT(name, pcpu) \
- { \
- .mutex = __MUTEX_INITIALIZER(name.mutex), \
- .head = NULL, \
- .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \
- }
+/* srcu_notifier_heads cannot be initialized statically */
#define ATOMIC_NOTIFIER_HEAD(name) \
struct atomic_notifier_head name = \
@@ -117,18 +113,6 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
struct raw_notifier_head name = \
RAW_NOTIFIER_INIT(name)
-#define _SRCU_NOTIFIER_HEAD(name, mod) \
- static DEFINE_PER_CPU(struct srcu_struct_array, \
- name##_head_srcu_array); \
- mod struct srcu_notifier_head name = \
- SRCU_NOTIFIER_INIT(name, name##_head_srcu_array)
-
-#define SRCU_NOTIFIER_HEAD(name) \
- _SRCU_NOTIFIER_HEAD(name, )
-
-#define SRCU_NOTIFIER_HEAD_STATIC(name) \
- _SRCU_NOTIFIER_HEAD(name, static)
-
#ifdef __KERNEL__
extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
@@ -198,12 +182,12 @@ static inline int notifier_to_errno(int ret)
/*
* Declared notifiers so far. I can imagine quite a few more chains
- * over time (eg laptop power reset chains, reboot chain (to clean
+ * over time (eg laptop power reset chains, reboot chain (to clean
* device units up), device [un]mount chain, module load/unload chain,
- * low memory chain, screenblank chain (for plug in modular screenblankers)
+ * low memory chain, screenblank chain (for plug in modular screenblankers)
* VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
*/
-
+
/* CPU notfiers are defined in include/linux/cpu.h. */
/* netdevice notifiers are defined in include/linux/netdevice.h */
diff --git a/include/linux/of.h b/include/linux/of.h
index f95aee3..9007c86 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -252,14 +252,12 @@ extern int of_property_read_u64(const struct device_node *np,
extern int of_property_read_string(struct device_node *np,
const char *propname,
const char **out_string);
-extern int of_property_read_string_index(struct device_node *np,
- const char *propname,
- int index, const char **output);
extern int of_property_match_string(struct device_node *np,
const char *propname,
const char *string);
-extern int of_property_count_strings(struct device_node *np,
- const char *propname);
+extern int of_property_read_string_helper(struct device_node *np,
+ const char *propname,
+ const char **out_strs, size_t sz, int index);
extern int of_device_is_compatible(const struct device_node *device,
const char *);
extern int of_device_is_available(const struct device_node *device);
@@ -434,15 +432,9 @@ static inline int of_property_read_string(struct device_node *np,
return -ENOSYS;
}
-static inline int of_property_read_string_index(struct device_node *np,
- const char *propname, int index,
- const char **out_string)
-{
- return -ENOSYS;
-}
-
-static inline int of_property_count_strings(struct device_node *np,
- const char *propname)
+static inline int of_property_read_string_helper(struct device_node *np,
+ const char *propname,
+ const char **out_strs, size_t sz, int index)
{
return -ENOSYS;
}
@@ -544,6 +536,70 @@ static inline int of_node_to_nid(struct device_node *np)
#endif
/**
+ * of_property_read_string_array() - Read an array of strings from a multiple
+ * strings property.
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_strs: output array of string pointers.
+ * @sz: number of array elements to read.
+ *
+ * Search for a property in a device tree node and retrieve a list of
+ * terminated string values (pointer to data, not a copy) in that property.
+ *
+ * If @out_strs is NULL, the number of strings in the property is returned.
+ */
+static inline int of_property_read_string_array(struct device_node *np,
+ const char *propname, const char **out_strs,
+ size_t sz)
+{
+ return of_property_read_string_helper(np, propname, out_strs, sz, 0);
+}
+
+/**
+ * of_property_count_strings() - Find and return the number of strings from a
+ * multiple strings property.
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ *
+ * Search for a property in a device tree node and retrieve the number of null
+ * terminated string contain in it. Returns the number of strings on
+ * success, -EINVAL if the property does not exist, -ENODATA if property
+ * does not have a value, and -EILSEQ if the string is not null-terminated
+ * within the length of the property data.
+ */
+static inline int of_property_count_strings(struct device_node *np,
+ const char *propname)
+{
+ return of_property_read_string_helper(np, propname, NULL, 0, 0);
+}
+
+/**
+ * of_property_read_string_index() - Find and read a string from a multiple
+ * strings property.
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @index: index of the string in the list of strings
+ * @out_string: pointer to null terminated return string, modified only if
+ * return value is 0.
+ *
+ * Search for a property in a device tree node and retrieve a null
+ * terminated string value (pointer to data, not a copy) in the list of strings
+ * contained in that property.
+ * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
+ * property does not have a value, and -EILSEQ if the string is not
+ * null-terminated within the length of the property data.
+ *
+ * The out_string pointer is modified only if a valid string can be decoded.
+ */
+static inline int of_property_read_string_index(struct device_node *np,
+ const char *propname,
+ int index, const char **output)
+{
+ int rc = of_property_read_string_helper(np, propname, output, 1, index);
+ return rc < 0 ? rc : 0;
+}
+
+/**
* of_property_read_bool - Findfrom a property
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
diff --git a/include/linux/oom.h b/include/linux/oom.h
index da60007..297cda5 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -50,6 +50,9 @@ static inline bool oom_task_origin(const struct task_struct *p)
extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask,
unsigned long totalpages);
+
+extern int oom_kills_count(void);
+extern void note_oom_kill(void);
extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
unsigned int points, unsigned long totalpages,
struct mem_cgroup *memcg, nodemask_t *nodemask,
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 6d53675..2284ea6 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -198,6 +198,7 @@ struct page; /* forward declaration */
TESTPAGEFLAG(Locked, locked)
PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error)
PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
+ __SETPAGEFLAG(Referenced, referenced)
PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
@@ -208,6 +209,7 @@ PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
PAGEFLAG(SavePinned, savepinned); /* Xen */
PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
+ __SETPAGEFLAG(SwapBacked, swapbacked)
__PAGEFLAG(SlobFree, slob_free)
@@ -228,9 +230,9 @@ PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1)
TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback)
PAGEFLAG(MappedToDisk, mappedtodisk)
-/* PG_readahead is only used for file reads; PG_reclaim is only for writes */
+/* PG_readahead is only used for reads; PG_reclaim is only for writes */
PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim)
-PAGEFLAG(Readahead, reclaim) /* Reminder to do async read-ahead */
+PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim)
#ifdef CONFIG_HIGHMEM
/*
@@ -317,13 +319,23 @@ CLEARPAGEFLAG(Uptodate, uptodate)
extern void cancel_dirty_page(struct page *page, unsigned int account_size);
int test_clear_page_writeback(struct page *page);
-int test_set_page_writeback(struct page *page);
+int __test_set_page_writeback(struct page *page, bool keep_write);
+
+#define test_set_page_writeback(page) \
+ __test_set_page_writeback(page, false)
+#define test_set_page_writeback_keepwrite(page) \
+ __test_set_page_writeback(page, true)
static inline void set_page_writeback(struct page *page)
{
test_set_page_writeback(page);
}
+static inline void set_page_writeback_keepwrite(struct page *page)
+{
+ test_set_page_writeback_keepwrite(page);
+}
+
#ifdef CONFIG_PAGEFLAGS_EXTENDED
/*
* System with lots of page flags available. This allows separate
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index ca67e80..777a524 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -24,9 +24,6 @@ enum {
*/
struct page_cgroup {
unsigned long flags;
-#ifdef CONFIG_PREEMPT_RT_BASE
- spinlock_t pcg_lock;
-#endif
struct mem_cgroup *mem_cgroup;
};
@@ -77,20 +74,12 @@ static inline void lock_page_cgroup(struct page_cgroup *pc)
* Don't take this lock in IRQ context.
* This lock is for pc->mem_cgroup, USED, MIGRATION
*/
-#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_lock(PCG_LOCK, &pc->flags);
-#else
- spin_lock(&pc->pcg_lock);
-#endif
}
static inline void unlock_page_cgroup(struct page_cgroup *pc)
{
-#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_unlock(PCG_LOCK, &pc->flags);
-#else
- spin_unlock(&pc->pcg_lock);
-#endif
}
#else /* CONFIG_MEMCG */
@@ -113,10 +102,6 @@ static inline void __init page_cgroup_init_flatmem(void)
{
}
-static inline void page_cgroup_lock_init(struct page_cgroup *pc)
-{
-}
-
#endif /* CONFIG_MEMCG */
#include <linux/swap.h>
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index 2ee8cd2..2baeee1 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -30,9 +30,12 @@ enum pageblock_bits {
PB_migrate,
PB_migrate_end = PB_migrate + 3 - 1,
/* 3 bits required for migrate types */
-#ifdef CONFIG_COMPACTION
PB_migrate_skip,/* If set the block is skipped by compaction */
-#endif /* CONFIG_COMPACTION */
+
+ /*
+ * Assume the bits will always align on a word. If this assumption
+ * changes then get/set pageblock needs updating.
+ */
NR_PAGEBLOCK_BITS
};
@@ -62,11 +65,26 @@ extern int pageblock_order;
/* Forward declaration */
struct page;
+unsigned long get_pfnblock_flags_mask(struct page *page,
+ unsigned long pfn,
+ unsigned long end_bitidx,
+ unsigned long mask);
+
+void set_pfnblock_flags_mask(struct page *page,
+ unsigned long flags,
+ unsigned long pfn,
+ unsigned long end_bitidx,
+ unsigned long mask);
+
/* Declarations for getting and setting flags. See mm/page_alloc.c */
-unsigned long get_pageblock_flags_group(struct page *page,
- int start_bitidx, int end_bitidx);
-void set_pageblock_flags_group(struct page *page, unsigned long flags,
- int start_bitidx, int end_bitidx);
+#define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \
+ get_pfnblock_flags_mask(page, page_to_pfn(page), \
+ end_bitidx, \
+ (1 << (end_bitidx - start_bitidx + 1)) - 1)
+#define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \
+ set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \
+ end_bitidx, \
+ (1 << (end_bitidx - start_bitidx + 1)) - 1)
#ifdef CONFIG_COMPACTION
#define get_pageblock_skip(page) \
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index e3dea75..bf944e8 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -99,7 +99,7 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
#define page_cache_get(page) get_page(page)
#define page_cache_release(page) put_page(page)
-void release_pages(struct page **pages, int nr, int cold);
+void release_pages(struct page **pages, int nr, bool cold);
/*
* speculatively take a reference to a page.
@@ -243,12 +243,116 @@ static inline struct page *page_cache_alloc_readahead(struct address_space *x)
typedef int filler_t(void *, struct page *);
-extern struct page * find_get_page(struct address_space *mapping,
- pgoff_t index);
-extern struct page * find_lock_page(struct address_space *mapping,
- pgoff_t index);
-extern struct page * find_or_create_page(struct address_space *mapping,
- pgoff_t index, gfp_t gfp_mask);
+pgoff_t page_cache_next_hole(struct address_space *mapping,
+ pgoff_t index, unsigned long max_scan);
+pgoff_t page_cache_prev_hole(struct address_space *mapping,
+ pgoff_t index, unsigned long max_scan);
+
+#define FGP_ACCESSED 0x00000001
+#define FGP_LOCK 0x00000002
+#define FGP_CREAT 0x00000004
+#define FGP_WRITE 0x00000008
+#define FGP_NOFS 0x00000010
+#define FGP_NOWAIT 0x00000020
+
+struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
+ int fgp_flags, gfp_t cache_gfp_mask);
+
+/**
+ * find_get_page - find and get a page reference
+ * @mapping: the address_space to search
+ * @offset: the page index
+ *
+ * Looks up the page cache slot at @mapping & @offset. If there is a
+ * page cache page, it is returned with an increased refcount.
+ *
+ * Otherwise, %NULL is returned.
+ */
+static inline struct page *find_get_page(struct address_space *mapping,
+ pgoff_t offset)
+{
+ return pagecache_get_page(mapping, offset, 0, 0);
+}
+
+static inline struct page *find_get_page_flags(struct address_space *mapping,
+ pgoff_t offset, int fgp_flags)
+{
+ return pagecache_get_page(mapping, offset, fgp_flags, 0);
+}
+
+/**
+ * find_lock_page - locate, pin and lock a pagecache page
+ * pagecache_get_page - find and get a page reference
+ * @mapping: the address_space to search
+ * @offset: the page index
+ *
+ * Looks up the page cache slot at @mapping & @offset. If there is a
+ * page cache page, it is returned locked and with an increased
+ * refcount.
+ *
+ * Otherwise, %NULL is returned.
+ *
+ * find_lock_page() may sleep.
+ */
+static inline struct page *find_lock_page(struct address_space *mapping,
+ pgoff_t offset)
+{
+ return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
+}
+
+/**
+ * find_or_create_page - locate or add a pagecache page
+ * @mapping: the page's address_space
+ * @index: the page's index into the mapping
+ * @gfp_mask: page allocation mode
+ *
+ * Looks up the page cache slot at @mapping & @offset. If there is a
+ * page cache page, it is returned locked and with an increased
+ * refcount.
+ *
+ * If the page is not present, a new page is allocated using @gfp_mask
+ * and added to the page cache and the VM's LRU list. The page is
+ * returned locked and with an increased refcount.
+ *
+ * On memory exhaustion, %NULL is returned.
+ *
+ * find_or_create_page() may sleep, even if @gfp_flags specifies an
+ * atomic allocation!
+ */
+static inline struct page *find_or_create_page(struct address_space *mapping,
+ pgoff_t offset, gfp_t gfp_mask)
+{
+ return pagecache_get_page(mapping, offset,
+ FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
+ gfp_mask);
+}
+
+/**
+ * grab_cache_page_nowait - returns locked page at given index in given cache
+ * @mapping: target address_space
+ * @index: the page index
+ *
+ * Same as grab_cache_page(), but do not wait if the page is unavailable.
+ * This is intended for speculative data generators, where the data can
+ * be regenerated if the page couldn't be grabbed. This routine should
+ * be safe to call while holding the lock for another page.
+ *
+ * Clear __GFP_FS when allocating the page to avoid recursion into the fs
+ * and deadlock against the caller's locked page.
+ */
+static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
+ pgoff_t index)
+{
+ return pagecache_get_page(mapping, index,
+ FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
+ mapping_gfp_mask(mapping));
+}
+
+struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
+struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
+unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
+ unsigned int nr_entries, struct page **entries,
+ pgoff_t *indices);
unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages);
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
@@ -268,10 +372,6 @@ static inline struct page *grab_cache_page(struct address_space *mapping,
return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
}
-extern struct page * grab_cache_page_nowait(struct address_space *mapping,
- pgoff_t index);
-extern struct page * read_cache_page_async(struct address_space *mapping,
- pgoff_t index, filler_t *filler, void *data);
extern struct page * read_cache_page(struct address_space *mapping,
pgoff_t index, filler_t *filler, void *data);
extern struct page * read_cache_page_gfp(struct address_space *mapping,
@@ -279,14 +379,6 @@ extern struct page * read_cache_page_gfp(struct address_space *mapping,
extern int read_cache_pages(struct address_space *mapping,
struct list_head *pages, filler_t *filler, void *data);
-static inline struct page *read_mapping_page_async(
- struct address_space *mapping,
- pgoff_t index, void *data)
-{
- filler_t *filler = (filler_t *)mapping->a_ops->readpage;
- return read_cache_page_async(mapping, index, filler, data);
-}
-
static inline struct page *read_mapping_page(struct address_space *mapping,
pgoff_t index, void *data)
{
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index e4dbfab..b45d391 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -22,6 +22,11 @@ struct pagevec {
void __pagevec_release(struct pagevec *pvec);
void __pagevec_lru_add(struct pagevec *pvec);
+unsigned pagevec_lookup_entries(struct pagevec *pvec,
+ struct address_space *mapping,
+ pgoff_t start, unsigned nr_entries,
+ pgoff_t *indices);
+void pagevec_remove_exceptionals(struct pagevec *pvec);
unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
pgoff_t start, unsigned nr_pages);
unsigned pagevec_lookup_tag(struct pagevec *pvec,
diff --git a/include/linux/pci.h b/include/linux/pci.h
index da172f9..573c049 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -323,6 +323,7 @@ struct pci_dev {
unsigned int is_added:1;
unsigned int is_busmaster:1; /* device is busmaster */
unsigned int no_msi:1; /* device may not use msi */
+ unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */
unsigned int block_cfg_access:1; /* config space access is blocked */
unsigned int broken_parity_status:1; /* Device generates false positive parity */
unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
@@ -478,6 +479,15 @@ static inline bool pci_is_root_bus(struct pci_bus *pbus)
return !(pbus->parent);
}
+static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
+{
+ dev = pci_physfn(dev);
+ if (pci_is_root_bus(dev->bus))
+ return NULL;
+
+ return dev->bus->self;
+}
+
#ifdef CONFIG_PCI_MSI
static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
{
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 97fbecd..057c1d8 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2551,6 +2551,7 @@
#define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823
#define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824
#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F
+#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095E
#define PCI_DEVICE_ID_INTEL_I960 0x0960
#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
#define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 95961f0..0afb48f 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -110,7 +110,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
pcpu_count = ACCESS_ONCE(ref->pcpu_count);
if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
- __this_cpu_inc(*pcpu_count);
+ this_cpu_inc(*pcpu_count);
else
atomic_inc(&ref->count);
@@ -139,7 +139,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
pcpu_count = ACCESS_ONCE(ref->pcpu_count);
if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
- __this_cpu_inc(*pcpu_count);
+ this_cpu_inc(*pcpu_count);
ret = true;
}
@@ -164,7 +164,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
pcpu_count = ACCESS_ONCE(ref->pcpu_count);
if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
- __this_cpu_dec(*pcpu_count);
+ this_cpu_dec(*pcpu_count);
else if (unlikely(atomic_dec_and_test(&ref->count)))
ref->release(ref);
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index f05adf5..c74088a 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -48,31 +48,6 @@
preempt_enable(); \
} while (0)
-#ifndef CONFIG_PREEMPT_RT_FULL
-# define get_local_var(var) get_cpu_var(var)
-# define put_local_var(var) put_cpu_var(var)
-# define get_local_ptr(var) get_cpu_ptr(var)
-# define put_local_ptr(var) put_cpu_ptr(var)
-#else
-# define get_local_var(var) (*({ \
- migrate_disable(); \
- &__get_cpu_var(var); }))
-
-# define put_local_var(var) do { \
- (void)&(var); \
- migrate_enable(); \
-} while (0)
-
-# define get_local_ptr(var) ({ \
- migrate_disable(); \
- this_cpu_ptr(var); })
-
-# define put_local_ptr(var) do { \
- (void)(var); \
- migrate_enable(); \
-} while (0)
-#endif
-
/* minimum unit size, also is the maximum supported allocation size */
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 2cc64b7..23705a5 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -2,7 +2,6 @@
#define _LINUX_PID_H
#include <linux/rcupdate.h>
-#include <linux/atomic.h>
enum pid_type
{
diff --git a/include/linux/plist.h b/include/linux/plist.h
index aa0fb39..8b6c970 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -98,6 +98,13 @@ struct plist_node {
}
/**
+ * PLIST_HEAD - declare and init plist_head
+ * @head: name for struct plist_head variable
+ */
+#define PLIST_HEAD(head) \
+ struct plist_head head = PLIST_HEAD_INIT(head)
+
+/**
* PLIST_NODE_INIT - static struct plist_node initializer
* @node: struct plist_node variable name
* @__prio: initial node priority
@@ -134,6 +141,8 @@ static inline void plist_node_init(struct plist_node *node, int prio)
extern void plist_add(struct plist_node *node, struct plist_head *head);
extern void plist_del(struct plist_node *node, struct plist_head *head);
+extern void plist_requeue(struct plist_node *node, struct plist_head *head);
+
/**
* plist_for_each - iterate over the plist
* @pos: the type * to use as a loop counter
@@ -143,6 +152,16 @@ extern void plist_del(struct plist_node *node, struct plist_head *head);
list_for_each_entry(pos, &(head)->node_list, node_list)
/**
+ * plist_for_each_continue - continue iteration over the plist
+ * @pos: the type * to use as a loop cursor
+ * @head: the head for your list
+ *
+ * Continue to iterate over plist, continuing after the current position.
+ */
+#define plist_for_each_continue(pos, head) \
+ list_for_each_entry_continue(pos, &(head)->node_list, node_list)
+
+/**
* plist_for_each_safe - iterate safely over a plist of given type
* @pos: the type * to use as a loop counter
* @n: another type * to use as temporary storage
@@ -163,6 +182,18 @@ extern void plist_del(struct plist_node *node, struct plist_head *head);
list_for_each_entry(pos, &(head)->node_list, mem.node_list)
/**
+ * plist_for_each_entry_continue - continue iteration over list of given type
+ * @pos: the type * to use as a loop cursor
+ * @head: the head for your list
+ * @m: the name of the list_struct within the struct
+ *
+ * Continue to iterate over list of given type, continuing after
+ * the current position.
+ */
+#define plist_for_each_entry_continue(pos, head, m) \
+ list_for_each_entry_continue(pos, &(head)->node_list, m.node_list)
+
+/**
* plist_for_each_entry_safe - iterate safely over list of given type
* @pos: the type * to use as a loop counter
* @n: another type * to use as temporary storage
@@ -229,6 +260,20 @@ static inline int plist_node_empty(const struct plist_node *node)
#endif
/**
+ * plist_next - get the next entry in list
+ * @pos: the type * to cursor
+ */
+#define plist_next(pos) \
+ list_next_entry(pos, node_list)
+
+/**
+ * plist_prev - get the prev entry in list
+ * @pos: the type * to cursor
+ */
+#define plist_prev(pos) \
+ list_prev_entry(pos, node_list)
+
+/**
* plist_first - return the first node (and thus, highest priority)
* @head: the &struct plist_head pointer
*
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index c153cf2..f5d4723 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -23,38 +23,15 @@
#define preempt_count() (current_thread_info()->preempt_count)
-#ifdef CONFIG_PREEMPT_LAZY
-#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0)
-#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0)
-#define inc_preempt_lazy_count() add_preempt_lazy_count(1)
-#define dec_preempt_lazy_count() sub_preempt_lazy_count(1)
-#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count)
-#else
-#define add_preempt_lazy_count(val) do { } while (0)
-#define sub_preempt_lazy_count(val) do { } while (0)
-#define inc_preempt_lazy_count() do { } while (0)
-#define dec_preempt_lazy_count() do { } while (0)
-#define preempt_lazy_count() (0)
-#endif
-
#ifdef CONFIG_PREEMPT
asmlinkage void preempt_schedule(void);
-# ifdef CONFIG_PREEMPT_LAZY
#define preempt_check_resched() \
do { \
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED) || \
- test_thread_flag(TIF_NEED_RESCHED_LAZY))) \
- preempt_schedule(); \
-} while (0)
-# else
-#define preempt_check_resched() \
-do { \
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
+ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)
-# endif
#ifdef CONFIG_CONTEXT_TRACKING
@@ -87,36 +64,17 @@ do { \
barrier(); \
} while (0)
-#define preempt_lazy_disable() \
-do { \
- inc_preempt_lazy_count(); \
- barrier(); \
-} while (0)
-
#define sched_preempt_enable_no_resched() \
do { \
barrier(); \
dec_preempt_count(); \
} while (0)
-#ifndef CONFIG_PREEMPT_RT_BASE
-# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
-# define preempt_check_resched_rt() barrier()
-#else
-# define preempt_enable_no_resched() preempt_enable()
-# define preempt_check_resched_rt() preempt_check_resched()
-#endif
+#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
#define preempt_enable() \
do { \
- sched_preempt_enable_no_resched(); \
- barrier(); \
- preempt_check_resched(); \
-} while (0)
-
-#define preempt_lazy_enable() \
-do { \
- dec_preempt_lazy_count(); \
+ preempt_enable_no_resched(); \
barrier(); \
preempt_check_resched(); \
} while (0)
@@ -165,31 +123,9 @@ do { \
#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier()
-#define preempt_check_resched_rt() barrier()
#endif /* CONFIG_PREEMPT_COUNT */
-#ifdef CONFIG_PREEMPT_RT_FULL
-# define preempt_disable_rt() preempt_disable()
-# define preempt_enable_rt() preempt_enable()
-# define preempt_disable_nort() barrier()
-# define preempt_enable_nort() barrier()
-# ifdef CONFIG_SMP
- extern void migrate_disable(void);
- extern void migrate_enable(void);
-# else /* CONFIG_SMP */
-# define migrate_disable() barrier()
-# define migrate_enable() barrier()
-# endif /* CONFIG_SMP */
-#else
-# define preempt_disable_rt() barrier()
-# define preempt_enable_rt() barrier()
-# define preempt_disable_nort() preempt_disable()
-# define preempt_enable_nort() preempt_enable()
-# define migrate_disable() preempt_disable()
-# define migrate_enable() preempt_enable()
-#endif
-
#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier;
diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h
index 199f278..931bc61 100644
--- a/include/linux/preempt_mask.h
+++ b/include/linux/preempt_mask.h
@@ -58,11 +58,7 @@
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
-#ifndef CONFIG_PREEMPT_RT_FULL
-# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
-#else
-# define SOFTIRQ_DISABLE_OFFSET (0)
-#endif
+#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
#ifndef PREEMPT_ACTIVE
#define PREEMPT_ACTIVE_BITS 1
@@ -75,15 +71,9 @@
#endif
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
| NMI_MASK))
-#ifndef CONFIG_PREEMPT_RT_FULL
-# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
-# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-#else
-# define softirq_count() (0UL)
-extern int in_serving_softirq(void);
-#endif
/*
* Are we doing bottom half or hardware interrupt processing?
@@ -94,6 +84,7 @@ extern int in_serving_softirq(void);
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
+#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
/*
* Are we in NMI context?
diff --git a/include/linux/printk.h b/include/linux/printk.h
index c262485..1864d94 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -101,11 +101,9 @@ int no_printk(const char *fmt, ...)
extern asmlinkage __printf(1, 2)
void early_printk(const char *fmt, ...);
void early_vprintk(const char *fmt, va_list ap);
-extern void printk_kill(void);
#else
static inline __printf(1, 2) __cold
void early_printk(const char *s, ...) { }
-static inline void printk_kill(void) { }
#endif
#ifdef CONFIG_PRINTK
@@ -126,9 +124,9 @@ asmlinkage __printf(1, 2) __cold
int printk(const char *fmt, ...);
/*
- * Special printk facility for scheduler use only, _DO_NOT_USE_ !
+ * Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ !
*/
-__printf(1, 2) __cold int printk_sched(const char *fmt, ...);
+__printf(1, 2) __cold int printk_deferred(const char *fmt, ...);
/*
* Please don't use printk_ratelimit(), because it shares ratelimiting state
@@ -139,6 +137,7 @@ extern int __printk_ratelimit(const char *func);
#define printk_ratelimit() __printk_ratelimit(__func__)
extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
unsigned int interval_msec);
+
extern int printk_delay_msec;
extern int dmesg_restrict;
extern int kptr_restrict;
@@ -162,7 +161,7 @@ int printk(const char *s, ...)
return 0;
}
static inline __printf(1, 2) __cold
-int printk_sched(const char *s, ...)
+int printk_deferred(const char *s, ...)
{
return 0;
}
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 9974975..4af3fdc 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -53,7 +53,8 @@ struct persistent_ram_zone {
};
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
- u32 sig, struct persistent_ram_ecc_info *ecc_info);
+ u32 sig, struct persistent_ram_ecc_info *ecc_info,
+ unsigned int memtype);
void persistent_ram_free(struct persistent_ram_zone *prz);
void persistent_ram_zap(struct persistent_ram_zone *prz);
@@ -76,6 +77,7 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
struct ramoops_platform_data {
unsigned long mem_size;
unsigned long mem_address;
+ unsigned int mem_type;
unsigned long record_size;
unsigned long console_size;
unsigned long ftrace_size;
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 07d0df6..cc79eff 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -5,6 +5,7 @@
#include <linux/sched.h> /* For struct task_struct. */
#include <linux/err.h> /* for IS_ERR_VALUE */
#include <linux/bug.h> /* For BUG_ON. */
+#include <linux/pid_namespace.h> /* For task_active_pid_ns. */
#include <uapi/linux/ptrace.h>
/*
@@ -129,6 +130,37 @@ static inline void ptrace_event(int event, unsigned long message)
}
/**
+ * ptrace_event_pid - possibly stop for a ptrace event notification
+ * @event: %PTRACE_EVENT_* value to report
+ * @pid: process identifier for %PTRACE_GETEVENTMSG to return
+ *
+ * Check whether @event is enabled and, if so, report @event and @pid
+ * to the ptrace parent. @pid is reported as the pid_t seen from the
+ * the ptrace parent's pid namespace.
+ *
+ * Called without locks.
+ */
+static inline void ptrace_event_pid(int event, struct pid *pid)
+{
+ /*
+ * FIXME: There's a potential race if a ptracer in a different pid
+ * namespace than parent attaches between computing message below and
+ * when we acquire tasklist_lock in ptrace_stop(). If this happens,
+ * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG.
+ */
+ unsigned long message = 0;
+ struct pid_namespace *ns;
+
+ rcu_read_lock();
+ ns = task_active_pid_ns(rcu_dereference(current->parent));
+ if (ns)
+ message = pid_nr_ns(pid, ns);
+ rcu_read_unlock();
+
+ ptrace_event(event, message);
+}
+
+/**
* ptrace_init_task - initialize ptrace state for a new child
* @child: new child task
* @ptrace: true if child should be ptrace'd by parent's tracer
@@ -302,6 +334,9 @@ static inline void user_single_step_siginfo(struct task_struct *tsk,
* calling arch_ptrace_stop() when it would be superfluous. For example,
* if the thread has not been back to user mode since the last stop, the
* thread state might indicate that nothing needs to be done.
+ *
+ * This is guaranteed to be invoked once before a task stops for ptrace and
+ * may include arch-specific operations necessary prior to a ptrace stop.
*/
#define arch_ptrace_stop_needed(code, info) (0)
#endif
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 6965fe3..1d3eee5 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -46,6 +46,14 @@ void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
void dquot_initialize(struct inode *inode);
void dquot_drop(struct inode *inode);
struct dquot *dqget(struct super_block *sb, struct kqid qid);
+static inline struct dquot *dqgrab(struct dquot *dquot)
+{
+ /* Make sure someone else has active reference to dquot */
+ WARN_ON_ONCE(!atomic_read(&dquot->dq_count));
+ WARN_ON_ONCE(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags));
+ atomic_inc(&dquot->dq_count);
+ return dquot;
+}
void dqput(struct dquot *dquot);
int dquot_scan_active(struct super_block *sb,
int (*fn)(struct dquot *dquot, unsigned long priv),
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 5b6d5b2..e8be53e 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -219,6 +219,7 @@ static inline void radix_tree_replace_slot(void **pslot, void *item)
int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
+void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
void *radix_tree_delete(struct radix_tree_root *, unsigned long);
unsigned int
radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
@@ -226,17 +227,8 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
void ***results, unsigned long *indices,
unsigned long first_index, unsigned int max_items);
-unsigned long radix_tree_next_hole(struct radix_tree_root *root,
- unsigned long index, unsigned long max_scan);
-unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
- unsigned long index, unsigned long max_scan);
-#ifndef CONFIG_PREEMPT_RT_FULL
int radix_tree_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload(gfp_t gfp_mask);
-#else
-static inline int radix_tree_preload(gfp_t gm) { return 0; }
-static inline int radix_tree_maybe_preload(gfp_t gfp_mask) { return 0; }
-#endif
void radix_tree_init(void);
void *radix_tree_tag_set(struct radix_tree_root *root,
unsigned long index, unsigned int tag);
@@ -261,7 +253,7 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
static inline void radix_tree_preload_end(void)
{
- preempt_enable_nort();
+ preempt_enable();
}
/**
diff --git a/include/linux/random.h b/include/linux/random.h
index de4894a..bf9085e 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -12,7 +12,7 @@
extern void add_device_randomness(const void *, unsigned int);
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value);
-extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip);
+extern void add_interrupt_randomness(int irq, int irq_flags);
extern void get_random_bytes(void *buf, int nbytes);
extern void get_random_bytes_arch(void *buf, int nbytes);
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 8b2693d..f1f1bc3 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -128,9 +128,6 @@ extern void call_rcu(struct rcu_head *head,
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
-#ifdef CONFIG_PREEMPT_RT_FULL
-#define call_rcu_bh call_rcu
-#else
/**
* call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
* @head: structure to be used for queueing the RCU updates.
@@ -154,7 +151,6 @@ extern void call_rcu(struct rcu_head *head,
*/
extern void call_rcu_bh(struct rcu_head *head,
void (*func)(struct rcu_head *head));
-#endif
/**
* call_rcu_sched() - Queue an RCU for invocation after sched grace period.
@@ -194,11 +190,6 @@ void synchronize_rcu(void);
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
*/
#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
-#ifndef CONFIG_PREEMPT_RT_FULL
-#define sched_rcu_preempt_depth() rcu_preempt_depth()
-#else
-static inline int sched_rcu_preempt_depth(void) { return 0; }
-#endif
#else /* #ifdef CONFIG_PREEMPT_RCU */
@@ -222,8 +213,6 @@ static inline int rcu_preempt_depth(void)
return 0;
}
-#define sched_rcu_preempt_depth() rcu_preempt_depth()
-
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
/* Internal to kernel */
@@ -373,14 +362,7 @@ static inline int rcu_read_lock_held(void)
* rcu_read_lock_bh_held() is defined out of line to avoid #include-file
* hell.
*/
-#ifdef CONFIG_PREEMPT_RT_FULL
-static inline int rcu_read_lock_bh_held(void)
-{
- return rcu_read_lock_held();
-}
-#else
extern int rcu_read_lock_bh_held(void);
-#endif
/**
* rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
@@ -837,14 +819,10 @@ static inline void rcu_read_unlock(void)
static inline void rcu_read_lock_bh(void)
{
local_bh_disable();
-#ifdef CONFIG_PREEMPT_RT_FULL
- rcu_read_lock();
-#else
__acquire(RCU_BH);
rcu_lock_acquire(&rcu_bh_lock_map);
rcu_lockdep_assert(!rcu_is_cpu_idle(),
"rcu_read_lock_bh() used illegally while idle");
-#endif
}
/*
@@ -854,14 +832,10 @@ static inline void rcu_read_lock_bh(void)
*/
static inline void rcu_read_unlock_bh(void)
{
-#ifdef CONFIG_PREEMPT_RT_FULL
- rcu_read_unlock();
-#else
rcu_lockdep_assert(!rcu_is_cpu_idle(),
"rcu_read_unlock_bh() used illegally while idle");
rcu_lock_release(&rcu_bh_lock_map);
__release(RCU_BH);
-#endif
local_bh_enable();
}
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 91333de..226169d 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -44,11 +44,7 @@ static inline void rcu_virt_note_context_switch(int cpu)
rcu_note_context_switch(cpu);
}
-#ifdef CONFIG_PREEMPT_RT_FULL
-# define synchronize_rcu_bh synchronize_rcu
-#else
extern void synchronize_rcu_bh(void);
-#endif
extern void synchronize_sched_expedited(void);
extern void synchronize_rcu_expedited(void);
@@ -76,19 +72,17 @@ static inline void synchronize_rcu_bh_expedited(void)
}
extern void rcu_barrier(void);
-#ifdef CONFIG_PREEMPT_RT_FULL
-# define rcu_barrier_bh rcu_barrier
-#else
extern void rcu_barrier_bh(void);
-#endif
extern void rcu_barrier_sched(void);
extern unsigned long rcutorture_testseq;
extern unsigned long rcutorture_vernum;
extern long rcu_batches_completed(void);
+extern long rcu_batches_completed_bh(void);
extern long rcu_batches_completed_sched(void);
extern void rcu_force_quiescent_state(void);
+extern void rcu_bh_force_quiescent_state(void);
extern void rcu_sched_force_quiescent_state(void);
extern void exit_rcu(void);
@@ -96,12 +90,4 @@ extern void exit_rcu(void);
extern void rcu_scheduler_starting(void);
extern int rcu_scheduler_active __read_mostly;
-#ifndef CONFIG_PREEMPT_RT_FULL
-extern void rcu_bh_force_quiescent_state(void);
-extern long rcu_batches_completed_bh(void);
-#else
-# define rcu_bh_force_quiescent_state rcu_force_quiescent_state
-# define rcu_batches_completed_bh rcu_batches_completed
-#endif
-
#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index d69cf63..49a4d6f 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
__ring_buffer_alloc((size), (flags), &__key); \
})
-void ring_buffer_wait(struct ring_buffer *buffer, int cpu);
+int ring_buffer_wait(struct ring_buffer *buffer, int cpu);
int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table);
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index fa18682..de17134 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -14,14 +14,10 @@
#include <linux/linkage.h>
#include <linux/plist.h>
-#include <linux/spinlock_types_raw.h>
+#include <linux/spinlock_types.h>
extern int max_lock_depth; /* for sysctl */
-#ifdef CONFIG_DEBUG_MUTEXES
-#include <linux/debug_locks.h>
-#endif
-
/**
* The rt_mutex structure
*
@@ -33,10 +29,9 @@ struct rt_mutex {
raw_spinlock_t wait_lock;
struct plist_head wait_list;
struct task_struct *owner;
- int save_state;
#ifdef CONFIG_DEBUG_RT_MUTEXES
- const char *file;
- const char *name;
+ int save_state;
+ const char *name, *file;
int line;
void *magic;
#endif
@@ -61,39 +56,19 @@ struct hrtimer_sleeper;
#ifdef CONFIG_DEBUG_RT_MUTEXES
# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
, .name = #mutexname, .file = __FILE__, .line = __LINE__
-
-# define rt_mutex_init(mutex) \
- do { \
- raw_spin_lock_init(&(mutex)->wait_lock); \
- __rt_mutex_init(mutex, #mutex); \
- } while (0)
-
+# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__)
extern void rt_mutex_debug_task_free(struct task_struct *tsk);
#else
# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
-
-# define rt_mutex_init(mutex) \
- do { \
- raw_spin_lock_init(&(mutex)->wait_lock); \
- __rt_mutex_init(mutex, #mutex); \
- } while (0)
-
+# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL)
# define rt_mutex_debug_task_free(t) do { } while (0)
#endif
-#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
- .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+#define __RT_MUTEX_INITIALIZER(mutexname) \
+ { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
, .wait_list = PLIST_HEAD_INIT(mutexname.wait_list) \
, .owner = NULL \
- __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
-
-
-#define __RT_MUTEX_INITIALIZER(mutexname) \
- { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) }
-
-#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \
- { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
- , .save_state = 1 }
+ __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
#define DEFINE_RT_MUTEX(mutexname) \
struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
@@ -115,7 +90,6 @@ extern void rt_mutex_destroy(struct rt_mutex *lock);
extern void rt_mutex_lock(struct rt_mutex *lock);
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock,
int detect_deadlock);
-extern int rt_mutex_lock_killable(struct rt_mutex *lock, int detect_deadlock);
extern int rt_mutex_timed_lock(struct rt_mutex *lock,
struct hrtimer_sleeper *timeout,
int detect_deadlock);
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index f28544b..321f4ec 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -4,6 +4,7 @@
#include <linux/mutex.h>
#include <linux/netdevice.h>
+#include <linux/wait.h>
#include <uapi/linux/rtnetlink.h>
extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
@@ -22,6 +23,10 @@ extern void rtnl_lock(void);
extern void rtnl_unlock(void);
extern int rtnl_trylock(void);
extern int rtnl_is_locked(void);
+
+extern wait_queue_head_t netdev_unregistering_wq;
+extern struct mutex net_mutex;
+
#ifdef CONFIG_PROVE_LOCKING
extern int lockdep_rtnl_is_held(void);
#endif /* #ifdef CONFIG_PROVE_LOCKING */
diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
deleted file mode 100644
index 49ed2d4..0000000
--- a/include/linux/rwlock_rt.h
+++ /dev/null
@@ -1,99 +0,0 @@
-#ifndef __LINUX_RWLOCK_RT_H
-#define __LINUX_RWLOCK_RT_H
-
-#ifndef __LINUX_SPINLOCK_H
-#error Do not include directly. Use spinlock.h
-#endif
-
-#define rwlock_init(rwl) \
-do { \
- static struct lock_class_key __key; \
- \
- rt_mutex_init(&(rwl)->lock); \
- __rt_rwlock_init(rwl, #rwl, &__key); \
-} while (0)
-
-extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
-extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
-extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
-extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags);
-extern int __lockfunc rt_read_trylock(rwlock_t *rwlock);
-extern void __lockfunc rt_write_unlock(rwlock_t *rwlock);
-extern void __lockfunc rt_read_unlock(rwlock_t *rwlock);
-extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock);
-extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock);
-extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
-
-#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
-#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
-
-#define write_trylock_irqsave(lock, flags) \
- __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags))
-
-#define read_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- flags = rt_read_lock_irqsave(lock); \
- } while (0)
-
-#define write_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- flags = rt_write_lock_irqsave(lock); \
- } while (0)
-
-#define read_lock(lock) rt_read_lock(lock)
-
-#define read_lock_bh(lock) \
- do { \
- local_bh_disable(); \
- rt_read_lock(lock); \
- } while (0)
-
-#define read_lock_irq(lock) read_lock(lock)
-
-#define write_lock(lock) rt_write_lock(lock)
-
-#define write_lock_bh(lock) \
- do { \
- local_bh_disable(); \
- rt_write_lock(lock); \
- } while (0)
-
-#define write_lock_irq(lock) write_lock(lock)
-
-#define read_unlock(lock) rt_read_unlock(lock)
-
-#define read_unlock_bh(lock) \
- do { \
- rt_read_unlock(lock); \
- local_bh_enable(); \
- } while (0)
-
-#define read_unlock_irq(lock) read_unlock(lock)
-
-#define write_unlock(lock) rt_write_unlock(lock)
-
-#define write_unlock_bh(lock) \
- do { \
- rt_write_unlock(lock); \
- local_bh_enable(); \
- } while (0)
-
-#define write_unlock_irq(lock) write_unlock(lock)
-
-#define read_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- (void) flags; \
- rt_read_unlock(lock); \
- } while (0)
-
-#define write_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- (void) flags; \
- rt_write_unlock(lock); \
- } while (0)
-
-#endif
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
index d0da966..cc0072e 100644
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
@@ -1,10 +1,6 @@
#ifndef __LINUX_RWLOCK_TYPES_H
#define __LINUX_RWLOCK_TYPES_H
-#if !defined(__LINUX_SPINLOCK_TYPES_H)
-# error "Do not include directly, include spinlock_types.h"
-#endif
-
/*
* include/linux/rwlock_types.h - generic rwlock type definitions
* and initializers
@@ -47,7 +43,6 @@ typedef struct {
RW_DEP_MAP_INIT(lockname) }
#endif
-#define DEFINE_RWLOCK(name) \
- rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
+#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
#endif /* __LINUX_RWLOCK_TYPES_H */
diff --git a/include/linux/rwlock_types_rt.h b/include/linux/rwlock_types_rt.h
deleted file mode 100644
index b138321..0000000
--- a/include/linux/rwlock_types_rt.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __LINUX_RWLOCK_TYPES_RT_H
-#define __LINUX_RWLOCK_TYPES_RT_H
-
-#ifndef __LINUX_SPINLOCK_TYPES_H
-#error "Do not include directly. Include spinlock_types.h instead"
-#endif
-
-/*
- * rwlocks - rtmutex which allows single reader recursion
- */
-typedef struct {
- struct rt_mutex lock;
- int read_depth;
- unsigned int break_lock;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} rwlock_t;
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
-#else
-# define RW_DEP_MAP_INIT(lockname)
-#endif
-
-#define __RW_LOCK_UNLOCKED(name) \
- { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \
- RW_DEP_MAP_INIT(name) }
-
-#define DEFINE_RWLOCK(name) \
- rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
-
-#endif
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 0ad6070..0616ffe 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -16,10 +16,6 @@
#include <linux/atomic.h>
-#ifdef CONFIG_PREEMPT_RT_FULL
-#include <linux/rwsem_rt.h>
-#else /* PREEMPT_RT_FULL */
-
struct rw_semaphore;
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
@@ -153,6 +149,4 @@ extern void up_read_non_owner(struct rw_semaphore *sem);
# define up_read_non_owner(sem) up_read(sem)
#endif
-#endif /* !PREEMPT_RT_FULL */
-
#endif /* _LINUX_RWSEM_H */
diff --git a/include/linux/rwsem_rt.h b/include/linux/rwsem_rt.h
deleted file mode 100644
index e94d945..0000000
--- a/include/linux/rwsem_rt.h
+++ /dev/null
@@ -1,128 +0,0 @@
-#ifndef _LINUX_RWSEM_RT_H
-#define _LINUX_RWSEM_RT_H
-
-#ifndef _LINUX_RWSEM_H
-#error "Include rwsem.h"
-#endif
-
-/*
- * RW-semaphores are a spinlock plus a reader-depth count.
- *
- * Note that the semantics are different from the usual
- * Linux rw-sems, in PREEMPT_RT mode we do not allow
- * multiple readers to hold the lock at once, we only allow
- * a read-lock owner to read-lock recursively. This is
- * better for latency, makes the implementation inherently
- * fair and makes it simpler as well.
- */
-
-#include <linux/rtmutex.h>
-
-struct rw_semaphore {
- struct rt_mutex lock;
- int read_depth;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-};
-
-#define __RWSEM_INITIALIZER(name) \
- { .lock = __RT_MUTEX_INITIALIZER(name.lock), \
- RW_DEP_MAP_INIT(name) }
-
-#define DECLARE_RWSEM(lockname) \
- struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
-
-extern void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
- struct lock_class_key *key);
-
-#define __rt_init_rwsem(sem, name, key) \
- do { \
- rt_mutex_init(&(sem)->lock); \
- __rt_rwsem_init((sem), (name), (key));\
- } while (0)
-
-#define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key)
-
-# define rt_init_rwsem(sem) \
-do { \
- static struct lock_class_key __key; \
- \
- __rt_init_rwsem((sem), #sem, &__key); \
-} while (0)
-
-extern void rt_down_write(struct rw_semaphore *rwsem);
-extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
-extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
-extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
- struct lockdep_map *nest);
-extern void rt_down_read(struct rw_semaphore *rwsem);
-extern int rt_down_write_trylock(struct rw_semaphore *rwsem);
-extern int rt_down_read_trylock(struct rw_semaphore *rwsem);
-extern void rt_up_read(struct rw_semaphore *rwsem);
-extern void rt_up_write(struct rw_semaphore *rwsem);
-extern void rt_downgrade_write(struct rw_semaphore *rwsem);
-
-#define init_rwsem(sem) rt_init_rwsem(sem)
-#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock)
-
-static inline void down_read(struct rw_semaphore *sem)
-{
- rt_down_read(sem);
-}
-
-static inline int down_read_trylock(struct rw_semaphore *sem)
-{
- return rt_down_read_trylock(sem);
-}
-
-static inline void down_write(struct rw_semaphore *sem)
-{
- rt_down_write(sem);
-}
-
-static inline int down_write_trylock(struct rw_semaphore *sem)
-{
- return rt_down_write_trylock(sem);
-}
-
-static inline void up_read(struct rw_semaphore *sem)
-{
- rt_up_read(sem);
-}
-
-static inline void up_write(struct rw_semaphore *sem)
-{
- rt_up_write(sem);
-}
-
-static inline void downgrade_write(struct rw_semaphore *sem)
-{
- rt_downgrade_write(sem);
-}
-
-static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
-{
- return rt_down_read_nested(sem, subclass);
-}
-
-static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
-{
- rt_down_write_nested(sem, subclass);
-}
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-static inline void down_write_nest_lock(struct rw_semaphore *sem,
- struct rw_semaphore *nest_lock)
-{
- rt_down_write_nested_lock(sem, &nest_lock->dep_map);
-}
-
-#else
-
-static inline void down_write_nest_lock(struct rw_semaphore *sem,
- struct rw_semaphore *nest_lock)
-{
- rt_down_write_nested_lock(sem, NULL);
-}
-#endif
-#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 625a41f..a4d7d19 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -23,7 +23,6 @@ struct sched_param {
#include <linux/nodemask.h>
#include <linux/mm_types.h>
-#include <asm/kmap_types.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/cputime.h>
@@ -53,7 +52,6 @@ struct sched_param {
#include <linux/llist.h>
#include <linux/uidgid.h>
#include <linux/gfp.h>
-#include <linux/hardirq.h>
#include <asm/processor.h>
@@ -65,6 +63,10 @@ struct fs_struct;
struct perf_event_context;
struct blk_plug;
+#define VMACACHE_BITS 2
+#define VMACACHE_SIZE (1U << VMACACHE_BITS)
+#define VMACACHE_MASK (VMACACHE_SIZE - 1)
+
/*
* List of flags we want to share for kernel threads,
* if only because they are not used by them anyway.
@@ -167,8 +169,11 @@ extern char ___assert_task_state[1 - 2*!!(
TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
__TASK_TRACED)
+#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
#define task_is_dead(task) ((task)->exit_state != 0)
+#define task_is_stopped_or_traced(task) \
+ ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
#define task_contributes_to_load(task) \
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0)
@@ -469,6 +474,7 @@ struct signal_struct {
atomic_t sigcnt;
atomic_t live;
int nr_threads;
+ struct list_head thread_head;
wait_queue_head_t wait_chldexit; /* for wait4() */
@@ -1021,7 +1027,6 @@ enum perf_event_task_context {
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
- volatile long saved_state; /* saved state for "spinlock sleepers" */
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
@@ -1064,12 +1069,6 @@ struct task_struct {
#endif
unsigned int policy;
-#ifdef CONFIG_PREEMPT_RT_FULL
- int migrate_disable;
-# ifdef CONFIG_SCHED_DEBUG
- int migrate_disable_atomic;
-# endif
-#endif
int nr_cpus_allowed;
cpumask_t cpus_allowed;
@@ -1098,6 +1097,9 @@ struct task_struct {
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
#endif
+ /* per-thread vma caching */
+ u32 vmacache_seqnum;
+ struct vm_area_struct *vmacache[VMACACHE_SIZE];
#if defined(SPLIT_RSS_COUNTING)
struct task_rss_stat rss_stat;
#endif
@@ -1154,6 +1156,7 @@ struct task_struct {
/* PID/PID hash table linkage. */
struct pid_link pids[PIDTYPE_MAX];
struct list_head thread_group;
+ struct list_head thread_node;
struct completion *vfork_done; /* for vfork() */
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
@@ -1165,8 +1168,7 @@ struct task_struct {
struct cputime prev_cputime;
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
- raw_spinlock_t vtime_lock;
- seqcount_t vtime_seq;
+ seqlock_t vtime_seqlock;
unsigned long long vtime_snap;
enum {
VTIME_SLEEPING = 0,
@@ -1182,9 +1184,6 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
-#ifdef CONFIG_PREEMPT_RT_BASE
- struct task_struct *posix_timer_list;
-#endif
/* process credentials */
const struct cred __rcu *real_cred; /* objective and real subjective task
@@ -1216,15 +1215,10 @@ struct task_struct {
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
- struct sigqueue *sigqueue_cache;
sigset_t blocked, real_blocked;
sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
struct sigpending pending;
-#ifdef CONFIG_PREEMPT_RT_FULL
- /* TODO: move me into ->restart_block ? */
- struct siginfo forced_info;
-#endif
unsigned long sas_ss_sp;
size_t sas_ss_size;
@@ -1261,9 +1255,6 @@ struct task_struct {
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
#endif
-#ifdef CONFIG_PREEMPT_RT_FULL
- int pagefault_disabled;
-#endif
#ifdef CONFIG_TRACE_IRQFLAGS
unsigned int irq_events;
unsigned long hardirq_enable_ip;
@@ -1339,9 +1330,6 @@ struct task_struct {
struct mutex perf_event_mutex;
struct list_head perf_event_list;
#endif
-#ifdef CONFIG_DEBUG_PREEMPT
- unsigned long preempt_disable_ip;
-#endif
#ifdef CONFIG_NUMA
struct mempolicy *mempolicy; /* Protected by alloc_lock */
short il_next;
@@ -1409,12 +1397,6 @@ struct task_struct {
unsigned long trace;
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
- u64 preempt_timestamp_hist;
-#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- long timer_offset;
-#endif
-#endif
#endif /* CONFIG_TRACING */
#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
struct memcg_batch_info {
@@ -1438,19 +1420,11 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
-#ifdef CONFIG_PREEMPT_RT_BASE
- struct rcu_head put_rcu;
- int softirq_nestcnt;
- unsigned int softirqs_raised;
-#endif
-#ifdef CONFIG_PREEMPT_RT_FULL
-# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
- int kmap_idx;
- pte_t kmap_pte[KM_TYPE_NR];
-# endif
-#endif
};
+/* Future-safe accessor for struct task_struct's cpus_allowed. */
+#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
+
#ifdef CONFIG_NUMA_BALANCING
extern void task_numa_fault(int node, int pages, bool migrated);
extern void set_numabalancing_state(bool enabled);
@@ -1463,17 +1437,6 @@ static inline void set_numabalancing_state(bool enabled)
}
#endif
-#ifdef CONFIG_PREEMPT_RT_FULL
-static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; }
-#else
-static inline bool cur_pf_disabled(void) { return false; }
-#endif
-
-static inline bool pagefault_disabled(void)
-{
- return in_atomic() || cur_pf_disabled();
-}
-
static inline struct pid *task_pid(struct task_struct *task)
{
return task->pids[PIDTYPE_PID].pid;
@@ -1547,6 +1510,24 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
}
+static inline int pid_alive(const struct task_struct *p);
+static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
+{
+ pid_t pid = 0;
+
+ rcu_read_lock();
+ if (pid_alive(tsk))
+ pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
+ rcu_read_unlock();
+
+ return pid;
+}
+
+static inline pid_t task_ppid_nr(const struct task_struct *tsk)
+{
+ return task_ppid_nr_ns(tsk, &init_pid_ns);
+}
+
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
struct pid_namespace *ns)
{
@@ -1586,7 +1567,7 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
*
* Return: 1 if the process is alive. 0 otherwise.
*/
-static inline int pid_alive(struct task_struct *p)
+static inline int pid_alive(const struct task_struct *p)
{
return p->pids[PIDTYPE_PID].pid != NULL;
}
@@ -1609,15 +1590,6 @@ extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
-#ifdef CONFIG_PREEMPT_RT_BASE
-extern void __put_task_struct_cb(struct rcu_head *rhp);
-
-static inline void put_task_struct(struct task_struct *t)
-{
- if (atomic_dec_and_test(&t->usage))
- call_rcu(&t->put_rcu, __put_task_struct_cb);
-}
-#else
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
@@ -1625,7 +1597,6 @@ static inline void put_task_struct(struct task_struct *t)
if (atomic_dec_and_test(&t->usage))
__put_task_struct(t);
}
-#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void task_cputime(struct task_struct *t,
@@ -1664,7 +1635,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
/*
* Per process flags
*/
-#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
@@ -1721,11 +1691,13 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
-/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
+/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
+ * __GFP_FS is also cleared as it implies __GFP_IO.
+ */
static inline gfp_t memalloc_noio_flags(gfp_t flags)
{
if (unlikely(current->flags & PF_MEMALLOC_NOIO))
- flags &= ~__GFP_IO;
+ flags &= ~(__GFP_IO | __GFP_FS);
return flags;
}
@@ -1810,10 +1782,6 @@ extern void do_set_cpus_allowed(struct task_struct *p,
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
-int migrate_me(void);
-void tell_sched_cpu_down_begin(int cpu);
-void tell_sched_cpu_down_done(int cpu);
-
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
@@ -1826,9 +1794,6 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
return -EINVAL;
return 0;
}
-static inline int migrate_me(void) { return 0; }
-static inline void tell_sched_cpu_down_begin(int cpu) { }
-static inline void tell_sched_cpu_down_done(int cpu) { }
#endif
#ifdef CONFIG_NO_HZ_COMMON
@@ -2036,7 +2001,6 @@ extern void xtime_update(unsigned long ticks);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
-extern int wake_up_lock_sleeper(struct task_struct * tsk);
extern void wake_up_new_task(struct task_struct *tsk);
#ifdef CONFIG_SMP
extern void kick_process(struct task_struct *tsk);
@@ -2151,24 +2115,12 @@ extern struct mm_struct * mm_alloc(void);
/* mmdrop drops the mm and the page tables */
extern void __mmdrop(struct mm_struct *);
-
static inline void mmdrop(struct mm_struct * mm)
{
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
__mmdrop(mm);
}
-#ifdef CONFIG_PREEMPT_RT_BASE
-extern void __mmdrop_delayed(struct rcu_head *rhp);
-static inline void mmdrop_delayed(struct mm_struct *mm)
-{
- if (atomic_dec_and_test(&mm->mm_count))
- call_rcu(&mm->delayed_drop, __mmdrop_delayed);
-}
-#else
-# define mmdrop_delayed(mm) mmdrop(mm)
-#endif
-
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */
@@ -2240,6 +2192,16 @@ extern bool current_is_single_threaded(void);
#define while_each_thread(g, t) \
while ((t = next_thread(t)) != g)
+#define __for_each_thread(signal, t) \
+ list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
+
+#define for_each_thread(p, t) \
+ __for_each_thread((p)->signal, t)
+
+/* Careful: this is a double loop, 'break' won't work as expected. */
+#define for_each_process_thread(p, t) \
+ for_each_process(p) for_each_thread(p, t)
+
static inline int get_nr_threads(struct task_struct *tsk)
{
return tsk->signal->nr_threads;
@@ -2451,52 +2413,6 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
-#ifdef CONFIG_PREEMPT_LAZY
-static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
-{
- set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
-}
-
-static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
-{
- clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
-}
-
-static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
-{
- return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
-}
-
-static inline int need_resched_lazy(void)
-{
- return test_thread_flag(TIF_NEED_RESCHED_LAZY);
-}
-
-static inline int need_resched_now(void)
-{
- return test_thread_flag(TIF_NEED_RESCHED);
-}
-
-static inline int need_resched(void)
-{
- return test_thread_flag(TIF_NEED_RESCHED) ||
- test_thread_flag(TIF_NEED_RESCHED_LAZY);
-}
-#else
-static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
-static inline int need_resched_lazy(void) { return 0; }
-
-static inline int need_resched_now(void)
-{
- return test_thread_flag(TIF_NEED_RESCHED);
-}
-
-static inline int need_resched(void)
-{
- return test_thread_flag(TIF_NEED_RESCHED);
-}
-#endif
-
static inline int restart_syscall(void)
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
@@ -2528,49 +2444,9 @@ static inline int signal_pending_state(long state, struct task_struct *p)
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}
-static inline bool __task_is_stopped_or_traced(struct task_struct *task)
-{
- if (task->state & (__TASK_STOPPED | __TASK_TRACED))
- return true;
-#ifdef CONFIG_PREEMPT_RT_FULL
- if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED))
- return true;
-#endif
- return false;
-}
-
-static inline bool task_is_stopped_or_traced(struct task_struct *task)
-{
- bool traced_stopped;
-
-#ifdef CONFIG_PREEMPT_RT_FULL
- unsigned long flags;
-
- raw_spin_lock_irqsave(&task->pi_lock, flags);
- traced_stopped = __task_is_stopped_or_traced(task);
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-#else
- traced_stopped = __task_is_stopped_or_traced(task);
-#endif
- return traced_stopped;
-}
-
-static inline bool task_is_traced(struct task_struct *task)
+static inline int need_resched(void)
{
- bool traced = false;
-
- if (task->state & __TASK_TRACED)
- return true;
-#ifdef CONFIG_PREEMPT_RT_FULL
- /* in case the task is sleeping on tasklist_lock */
- raw_spin_lock_irq(&task->pi_lock);
- if (task->state & __TASK_TRACED)
- traced = true;
- else if (task->saved_state & __TASK_TRACED)
- traced = true;
- raw_spin_unlock_irq(&task->pi_lock);
-#endif
- return traced;
+ return unlikely(test_thread_flag(TIF_NEED_RESCHED));
}
/*
@@ -2589,7 +2465,7 @@ extern int _cond_resched(void);
extern int __cond_resched_lock(spinlock_t *lock);
-#if defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT_FULL)
+#ifdef CONFIG_PREEMPT_COUNT
#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
#else
#define PREEMPT_LOCK_OFFSET 0
@@ -2600,16 +2476,12 @@ extern int __cond_resched_lock(spinlock_t *lock);
__cond_resched_lock(lock); \
})
-#ifndef CONFIG_PREEMPT_RT_FULL
extern int __cond_resched_softirq(void);
#define cond_resched_softirq() ({ \
__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
__cond_resched_softirq(); \
})
-#else
-# define cond_resched_softirq() cond_resched()
-#endif
static inline void cond_resched_rcu(void)
{
@@ -2795,26 +2667,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
-static inline int __migrate_disabled(struct task_struct *p)
-{
-#ifdef CONFIG_PREEMPT_RT_FULL
- return p->migrate_disable;
-#else
- return 0;
-#endif
-}
-
-/* Future-safe accessor for struct task_struct's cpus_allowed. */
-static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
-{
-#ifdef CONFIG_PREEMPT_RT_FULL
- if (p->migrate_disable)
- return cpumask_of(task_cpu(p));
-#endif
-
- return &p->cpus_allowed;
-}
-
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index 4d54d6c..440434d 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -35,7 +35,6 @@ static inline int rt_task(struct task_struct *p)
#ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
-extern int rt_mutex_check_prio(struct task_struct *task, int newprio);
extern void rt_mutex_adjust_pi(struct task_struct *p);
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
{
@@ -46,10 +45,6 @@ static inline int rt_mutex_getprio(struct task_struct *p)
{
return p->normal_prio;
}
-static inline int rt_mutex_check_prio(struct task_struct *task, int newprio)
-{
- return 0;
-}
# define rt_mutex_adjust_pi(p) do { } while (0)
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
{
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 019a936..21a2093 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -152,30 +152,18 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
* Sequence counter only version assumes that callers are using their
* own mutexing.
*/
-static inline void __write_seqcount_begin(seqcount_t *s)
+static inline void write_seqcount_begin(seqcount_t *s)
{
s->sequence++;
smp_wmb();
}
-static inline void write_seqcount_begin(seqcount_t *s)
-{
- preempt_disable_rt();
- __write_seqcount_begin(s);
-}
-
-static inline void __write_seqcount_end(seqcount_t *s)
+static inline void write_seqcount_end(seqcount_t *s)
{
smp_wmb();
s->sequence++;
}
-static inline void write_seqcount_end(seqcount_t *s)
-{
- __write_seqcount_end(s);
- preempt_enable_rt();
-}
-
/**
* write_seqcount_barrier - invalidate in-progress read-side seq operations
* @s: pointer to seqcount_t
@@ -216,32 +204,10 @@ typedef struct {
/*
* Read side functions for starting and finalizing a read side section.
*/
-#ifndef CONFIG_PREEMPT_RT_FULL
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
return read_seqcount_begin(&sl->seqcount);
}
-#else
-/*
- * Starvation safe read side for RT
- */
-static inline unsigned read_seqbegin(seqlock_t *sl)
-{
- unsigned ret;
-
-repeat:
- ret = ACCESS_ONCE(sl->seqcount.sequence);
- if (unlikely(ret & 1)) {
- /*
- * Take the lock and let the writer proceed (i.e. evtl
- * boost it), otherwise we could loop here forever.
- */
- spin_unlock_wait(&sl->lock);
- goto repeat;
- }
- return ret;
-}
-#endif
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
@@ -256,36 +222,36 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_begin(&sl->seqcount);
}
static inline void write_sequnlock(seqlock_t *sl)
{
- __write_seqcount_end(&sl->seqcount);
+ write_seqcount_end(&sl->seqcount);
spin_unlock(&sl->lock);
}
static inline void write_seqlock_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_begin(&sl->seqcount);
}
static inline void write_sequnlock_bh(seqlock_t *sl)
{
- __write_seqcount_end(&sl->seqcount);
+ write_seqcount_end(&sl->seqcount);
spin_unlock_bh(&sl->lock);
}
static inline void write_seqlock_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_begin(&sl->seqcount);
}
static inline void write_sequnlock_irq(seqlock_t *sl)
{
- __write_seqcount_end(&sl->seqcount);
+ write_seqcount_end(&sl->seqcount);
spin_unlock_irq(&sl->lock);
}
@@ -294,7 +260,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
unsigned long flags;
spin_lock_irqsave(&sl->lock, flags);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_begin(&sl->seqcount);
return flags;
}
@@ -304,7 +270,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
- __write_seqcount_end(&sl->seqcount);
+ write_seqcount_end(&sl->seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
}
diff --git a/include/linux/serio.h b/include/linux/serio.h
index 36aac73..9f779c7 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -23,6 +23,7 @@ struct serio {
char name[32];
char phys[32];
+ char firmware_id[128];
bool manual_bind;
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 30aa0dc..deb4960 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -49,6 +49,7 @@ extern struct file *shmem_file_setup(const char *name,
loff_t size, unsigned long flags);
extern int shmem_zero_setup(struct vm_area_struct *);
extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
+extern bool shmem_mapping(struct address_space *mapping);
extern void shmem_unlock_mapping(struct address_space *mapping);
extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 1414eb2..2ac423b 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -226,7 +226,6 @@ static inline void init_sigpending(struct sigpending *sig)
}
extern void flush_sigqueue(struct sigpending *queue);
-extern void flush_task_sigqueue(struct task_struct *tsk);
/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
static inline int valid_signal(unsigned long sig)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 3bb9cf3..2960dab 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -133,7 +133,6 @@ struct sk_buff_head {
__u32 qlen;
spinlock_t lock;
- raw_spinlock_t raw_lock;
};
struct sk_buff;
@@ -1074,12 +1073,6 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
__skb_queue_head_init(list);
}
-static inline void skb_queue_head_init_raw(struct sk_buff_head *list)
-{
- raw_spin_lock_init(&list->raw_lock);
- __skb_queue_head_init(list);
-}
-
static inline void skb_queue_head_init_class(struct sk_buff_head *list,
struct lock_class_key *class)
{
@@ -2401,6 +2394,7 @@ extern struct sk_buff *skb_segment(struct sk_buff *skb,
netdev_features_t features);
unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
+struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
int len, void *buffer)
diff --git a/include/linux/smp.h b/include/linux/smp.h
index e05b694..731f523 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -188,9 +188,6 @@ static inline void __smp_call_function_single(int cpuid,
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
#define put_cpu() preempt_enable()
-#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
-#define put_cpu_light() migrate_enable()
-
/*
* Callback to arch code if there's nosmp or maxcpus=0 on the
* boot command line:
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index 54f91d3..46cca4c 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -23,7 +23,7 @@ int sock_diag_check_cookie(void *sk, __u32 *cookie);
void sock_diag_save_cookie(void *sk, __u32 *cookie);
int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
-int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
+int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
struct sk_buff *skb, int attrtype);
#endif
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index a124f92..75f3494 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -262,11 +262,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
/* Include rwlock functions */
-#ifdef CONFIG_PREEMPT_RT_FULL
-# include <linux/rwlock_rt.h>
-#else
-# include <linux/rwlock.h>
-#endif
+#include <linux/rwlock.h>
/*
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
@@ -277,10 +273,6 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
# include <linux/spinlock_api_up.h>
#endif
-#ifdef CONFIG_PREEMPT_RT_FULL
-# include <linux/spinlock_rt.h>
-#else /* PREEMPT_RT_FULL */
-
/*
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
@@ -410,6 +402,4 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
-#endif /* !PREEMPT_RT_FULL */
-
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 1356078..bdb9993 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -191,8 +191,6 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
return 0;
}
-#ifndef CONFIG_PREEMPT_RT_FULL
-# include <linux/rwlock_api_smp.h>
-#endif
+#include <linux/rwlock_api_smp.h>
#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
deleted file mode 100644
index ac6f08b..0000000
--- a/include/linux/spinlock_rt.h
+++ /dev/null
@@ -1,166 +0,0 @@
-#ifndef __LINUX_SPINLOCK_RT_H
-#define __LINUX_SPINLOCK_RT_H
-
-#ifndef __LINUX_SPINLOCK_H
-#error Do not include directly. Use spinlock.h
-#endif
-
-#include <linux/bug.h>
-
-extern void
-__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
-
-#define spin_lock_init(slock) \
-do { \
- static struct lock_class_key __key; \
- \
- rt_mutex_init(&(slock)->lock); \
- __rt_spin_lock_init(slock, #slock, &__key); \
-} while (0)
-
-extern void __lockfunc rt_spin_lock(spinlock_t *lock);
-extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
-extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
-extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
-extern void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock);
-extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
-extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
-extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
-extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
-extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
-
-/*
- * lockdep-less calls, for derived types like rwlock:
- * (for trylock they can use rt_mutex_trylock() directly.
- */
-extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
-extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
-
-#define spin_lock(lock) \
- do { \
- migrate_disable(); \
- rt_spin_lock(lock); \
- } while (0)
-
-#define spin_lock_bh(lock) \
- do { \
- local_bh_disable(); \
- migrate_disable(); \
- rt_spin_lock(lock); \
- } while (0)
-
-#define spin_lock_irq(lock) spin_lock(lock)
-
-#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock))
-
-#define spin_trylock(lock) \
-({ \
- int __locked; \
- migrate_disable(); \
- __locked = spin_do_trylock(lock); \
- if (!__locked) \
- migrate_enable(); \
- __locked; \
-})
-
-#ifdef CONFIG_LOCKDEP
-# define spin_lock_nested(lock, subclass) \
- do { \
- migrate_disable(); \
- rt_spin_lock_nested(lock, subclass); \
- } while (0)
-
-# define spin_lock_irqsave_nested(lock, flags, subclass) \
- do { \
- typecheck(unsigned long, flags); \
- flags = 0; \
- migrate_disable(); \
- rt_spin_lock_nested(lock, subclass); \
- } while (0)
-#else
-# define spin_lock_nested(lock, subclass) spin_lock(lock)
-
-# define spin_lock_irqsave_nested(lock, flags, subclass) \
- do { \
- typecheck(unsigned long, flags); \
- flags = 0; \
- spin_lock(lock); \
- } while (0)
-#endif
-
-#define spin_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- flags = 0; \
- spin_lock(lock); \
- } while (0)
-
-static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
-{
- unsigned long flags = 0;
-#ifdef CONFIG_TRACE_IRQFLAGS
- flags = rt_spin_lock_trace_flags(lock);
-#else
- spin_lock(lock); /* lock_local */
-#endif
- return flags;
-}
-
-/* FIXME: we need rt_spin_lock_nest_lock */
-#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
-
-#define spin_unlock(lock) \
- do { \
- rt_spin_unlock(lock); \
- migrate_enable(); \
- } while (0)
-
-#define spin_unlock_bh(lock) \
- do { \
- rt_spin_unlock(lock); \
- migrate_enable(); \
- local_bh_enable(); \
- } while (0)
-
-#define spin_unlock_irq(lock) spin_unlock(lock)
-
-#define spin_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- (void) flags; \
- spin_unlock(lock); \
- } while (0)
-
-#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock))
-#define spin_trylock_irq(lock) spin_trylock(lock)
-
-#define spin_trylock_irqsave(lock, flags) \
- rt_spin_trylock_irqsave(lock, &(flags))
-
-#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock)
-
-#ifdef CONFIG_GENERIC_LOCKBREAK
-# define spin_is_contended(lock) ((lock)->break_lock)
-#else
-# define spin_is_contended(lock) (((void)(lock), 0))
-#endif
-
-static inline int spin_can_lock(spinlock_t *lock)
-{
- return !rt_mutex_is_locked(&lock->lock);
-}
-
-static inline int spin_is_locked(spinlock_t *lock)
-{
- return rt_mutex_is_locked(&lock->lock);
-}
-
-static inline void assert_spin_locked(spinlock_t *lock)
-{
- BUG_ON(!spin_is_locked(lock));
-}
-
-#define atomic_dec_and_lock(atomic, lock) \
- atomic_dec_and_spin_lock(atomic, lock)
-
-#endif
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 10bac71..73548eb 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -9,15 +9,80 @@
* Released under the General Public License (GPL).
*/
-#include <linux/spinlock_types_raw.h>
+#if defined(CONFIG_SMP)
+# include <asm/spinlock_types.h>
+#else
+# include <linux/spinlock_types_up.h>
+#endif
+
+#include <linux/lockdep.h>
+
+typedef struct raw_spinlock {
+ arch_spinlock_t raw_lock;
+#ifdef CONFIG_GENERIC_LOCKBREAK
+ unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned int magic, owner_cpu;
+ void *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} raw_spinlock_t;
+
+#define SPINLOCK_MAGIC 0xdead4ead
+
+#define SPINLOCK_OWNER_INIT ((void *)-1L)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#else
+# define SPIN_DEP_MAP_INIT(lockname)
+#endif
-#ifndef CONFIG_PREEMPT_RT_FULL
-# include <linux/spinlock_types_nort.h>
-# include <linux/rwlock_types.h>
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define SPIN_DEBUG_INIT(lockname) \
+ .magic = SPINLOCK_MAGIC, \
+ .owner_cpu = -1, \
+ .owner = SPINLOCK_OWNER_INIT,
#else
-# include <linux/rtmutex.h>
-# include <linux/spinlock_types_rt.h>
-# include <linux/rwlock_types_rt.h>
+# define SPIN_DEBUG_INIT(lockname)
#endif
+#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
+ { \
+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+ SPIN_DEBUG_INIT(lockname) \
+ SPIN_DEP_MAP_INIT(lockname) }
+
+#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
+ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+
+typedef struct spinlock {
+ union {
+ struct raw_spinlock rlock;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
+ struct {
+ u8 __padding[LOCK_PADSIZE];
+ struct lockdep_map dep_map;
+ };
+#endif
+ };
+} spinlock_t;
+
+#define __SPIN_LOCK_INITIALIZER(lockname) \
+ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
+
+#define __SPIN_LOCK_UNLOCKED(lockname) \
+ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+
+#include <linux/rwlock_types.h>
+
#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_nort.h b/include/linux/spinlock_types_nort.h
deleted file mode 100644
index f1dac1f..0000000
--- a/include/linux/spinlock_types_nort.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __LINUX_SPINLOCK_TYPES_NORT_H
-#define __LINUX_SPINLOCK_TYPES_NORT_H
-
-#ifndef __LINUX_SPINLOCK_TYPES_H
-#error "Do not include directly. Include spinlock_types.h instead"
-#endif
-
-/*
- * The non RT version maps spinlocks to raw_spinlocks
- */
-typedef struct spinlock {
- union {
- struct raw_spinlock rlock;
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
- struct {
- u8 __padding[LOCK_PADSIZE];
- struct lockdep_map dep_map;
- };
-#endif
- };
-} spinlock_t;
-
-#define __SPIN_LOCK_INITIALIZER(lockname) \
- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
-
-#define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
-
-#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
-
-#endif
diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
deleted file mode 100644
index edffc4d..0000000
--- a/include/linux/spinlock_types_raw.h
+++ /dev/null
@@ -1,56 +0,0 @@
-#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
-#define __LINUX_SPINLOCK_TYPES_RAW_H
-
-#if defined(CONFIG_SMP)
-# include <asm/spinlock_types.h>
-#else
-# include <linux/spinlock_types_up.h>
-#endif
-
-#include <linux/lockdep.h>
-
-typedef struct raw_spinlock {
- arch_spinlock_t raw_lock;
-#ifdef CONFIG_GENERIC_LOCKBREAK
- unsigned int break_lock;
-#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- unsigned int magic, owner_cpu;
- void *owner;
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} raw_spinlock_t;
-
-#define SPINLOCK_MAGIC 0xdead4ead
-
-#define SPINLOCK_OWNER_INIT ((void *)-1L)
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
-#else
-# define SPIN_DEP_MAP_INIT(lockname)
-#endif
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-# define SPIN_DEBUG_INIT(lockname) \
- .magic = SPINLOCK_MAGIC, \
- .owner_cpu = -1, \
- .owner = SPINLOCK_OWNER_INIT,
-#else
-# define SPIN_DEBUG_INIT(lockname)
-#endif
-
-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
- { \
- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
- SPIN_DEBUG_INIT(lockname) \
- SPIN_DEP_MAP_INIT(lockname) }
-
-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
-
-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
-
-#endif
diff --git a/include/linux/spinlock_types_rt.h b/include/linux/spinlock_types_rt.h
deleted file mode 100644
index 9fd4319..0000000
--- a/include/linux/spinlock_types_rt.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef __LINUX_SPINLOCK_TYPES_RT_H
-#define __LINUX_SPINLOCK_TYPES_RT_H
-
-#ifndef __LINUX_SPINLOCK_TYPES_H
-#error "Do not include directly. Include spinlock_types.h instead"
-#endif
-
-#include <linux/cache.h>
-
-/*
- * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field:
- */
-typedef struct spinlock {
- struct rt_mutex lock;
- unsigned int break_lock;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} spinlock_t;
-
-#ifdef CONFIG_DEBUG_RT_MUTEXES
-# define __RT_SPIN_INITIALIZER(name) \
- { \
- .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
- .save_state = 1, \
- .file = __FILE__, \
- .line = __LINE__ , \
- }
-#else
-# define __RT_SPIN_INITIALIZER(name) \
- { \
- .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
- .save_state = 1, \
- }
-#endif
-
-/*
-.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock)
-*/
-
-#define __SPIN_LOCK_UNLOCKED(name) \
- { .lock = __RT_SPIN_INITIALIZER(name.lock), \
- SPIN_DEP_MAP_INIT(name) }
-
-#define __DEFINE_SPINLOCK(name) \
- spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
-
-#define DEFINE_SPINLOCK(name) \
- spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name)
-
-#endif
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index d5e50dd..c114614 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -84,10 +84,10 @@ int init_srcu_struct(struct srcu_struct *sp);
void process_srcu(struct work_struct *work);
-#define __SRCU_STRUCT_INIT(name, pcpu_name) \
+#define __SRCU_STRUCT_INIT(name) \
{ \
.completed = -300, \
- .per_cpu_ref = &pcpu_name, \
+ .per_cpu_ref = &name##_srcu_array, \
.queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \
.running = false, \
.batch_queue = RCU_BATCH_INIT(name.batch_queue), \
@@ -104,12 +104,11 @@ void process_srcu(struct work_struct *work);
*/
#define DEFINE_SRCU(name) \
static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
- struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array);
+ struct srcu_struct name = __SRCU_STRUCT_INIT(name);
#define DEFINE_STATIC_SRCU(name) \
static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
- static struct srcu_struct name = __SRCU_STRUCT_INIT(\
- name, name##_srcu_array);
+ static struct srcu_struct name = __SRCU_STRUCT_INIT(name);
/**
* call_srcu() - Queue a callback for invocation after an SRCU grace period
diff --git a/include/linux/string.h b/include/linux/string.h
index ac889c5..0ed878d 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -129,7 +129,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
#endif
extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
- const void *from, size_t available);
+ const void *from, size_t available);
/**
* strstarts - does @str start with @prefix?
@@ -141,7 +141,8 @@ static inline bool strstarts(const char *str, const char *prefix)
return strncmp(str, prefix, strlen(prefix)) == 0;
}
-extern size_t memweight(const void *ptr, size_t bytes);
+size_t memweight(const void *ptr, size_t bytes);
+void memzero_explicit(void *s, size_t count);
/**
* kbasename - return the last part of a pathname.
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index b05963f..f5bfb1a 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -32,6 +32,7 @@ struct svc_xprt_class {
struct svc_xprt_ops *xcl_ops;
struct list_head xcl_list;
u32 xcl_max_payload;
+ int xcl_ident;
};
/*
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 62fd1b7..947009e 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -56,6 +56,7 @@ int svc_recv(struct svc_rqst *, long);
int svc_send(struct svc_rqst *);
void svc_drop(struct svc_rqst *);
void svc_sock_update_bufs(struct svc_serv *serv);
+bool svc_alien_sock(struct net *net, int fd);
int svc_addsock(struct svc_serv *serv, const int fd,
char *name_return, const size_t len);
void svc_init_xprt_sock(void);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 46ba0c6..241bf09 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -214,8 +214,9 @@ struct percpu_cluster {
struct swap_info_struct {
unsigned long flags; /* SWP_USED etc: see above */
signed short prio; /* swap priority of this type */
+ struct plist_node list; /* entry in swap_active_head */
+ struct plist_node avail_list; /* entry in swap_avail_head */
signed char type; /* strange name for an index */
- signed char next; /* next type on the swap list */
unsigned int max; /* extent of the swap_map */
unsigned char *swap_map; /* vmalloc'ed array of usage counts */
struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
@@ -255,11 +256,6 @@ struct swap_info_struct {
struct swap_cluster_info discard_cluster_tail; /* list tail of discard clusters */
};
-struct swap_list_t {
- int head; /* head of priority-ordered swapfile list */
- int next; /* swapfile to be used next */
-};
-
/* linux/mm/page_alloc.c */
extern unsigned long totalram_pages;
extern unsigned long totalreserve_pages;
@@ -272,12 +268,14 @@ extern unsigned long nr_free_pagecache_pages(void);
/* linux/mm/swap.c */
-extern void __lru_cache_add(struct page *);
extern void lru_cache_add(struct page *);
+extern void lru_cache_add_anon(struct page *page);
+extern void lru_cache_add_file(struct page *page);
extern void lru_add_page_tail(struct page *page, struct page *page_tail,
struct lruvec *lruvec, struct list_head *head);
extern void activate_page(struct page *);
extern void mark_page_accessed(struct page *);
+extern void init_page_accessed(struct page *page);
extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_all(void);
@@ -287,22 +285,6 @@ extern void swap_setup(void);
extern void add_page_to_unevictable_list(struct page *page);
-/**
- * lru_cache_add: add a page to the page lists
- * @page: the page to add
- */
-static inline void lru_cache_add_anon(struct page *page)
-{
- ClearPageActive(page);
- __lru_cache_add(page);
-}
-
-static inline void lru_cache_add_file(struct page *page)
-{
- ClearPageActive(page);
- __lru_cache_add(page);
-}
-
/* linux/mm/vmscan.c */
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
@@ -460,7 +442,7 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
#define free_page_and_swap_cache(page) \
page_cache_release(page)
#define free_pages_and_swap_cache(pages, nr) \
- release_pages((pages), (nr), 0);
+ release_pages((pages), (nr), false);
static inline void show_swap_cache_info(void)
{
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
index e282624..388293a 100644
--- a/include/linux/swapfile.h
+++ b/include/linux/swapfile.h
@@ -6,7 +6,7 @@
* want to expose them to the dozens of source files that include swap.h
*/
extern spinlock_t swap_lock;
-extern struct swap_list_t swap_list;
+extern struct plist_head swap_active_head;
extern struct swap_info_struct *swap_info[];
extern int try_to_unuse(unsigned int, bool, unsigned long);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index b15655f..14a8ff2 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -25,7 +25,6 @@
#include <linux/rcupdate.h>
#include <linux/wait.h>
#include <linux/rbtree.h>
-#include <linux/atomic.h>
#include <uapi/linux/sysctl.h>
/* For the /proc/sys support */
diff --git a/include/linux/time.h b/include/linux/time.h
index d5d229b..7d532a3 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -173,6 +173,19 @@ extern void getboottime(struct timespec *ts);
extern void monotonic_to_bootbased(struct timespec *ts);
extern void get_monotonic_boottime(struct timespec *ts);
+static inline bool timeval_valid(const struct timeval *tv)
+{
+ /* Dates before 1970 are bogus */
+ if (tv->tv_sec < 0)
+ return false;
+
+ /* Can't have more microseconds then a second */
+ if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
+ return false;
+
+ return true;
+}
+
extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
extern int timekeeping_valid_for_hres(void);
extern u64 timekeeping_max_deferment(void);
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 5fcd72c..8c5a197 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -241,7 +241,7 @@ extern void add_timer(struct timer_list *timer);
extern int try_to_del_timer_sync(struct timer_list *timer);
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+#ifdef CONFIG_SMP
extern int del_timer_sync(struct timer_list *timer);
#else
# define del_timer_sync(t) del_timer(t)
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 44b3751..5ca0951 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -6,37 +6,38 @@
/*
* These routines enable/disable the pagefault handler in that
- * it will not take any MM locks and go straight to the fixup table.
+ * it will not take any locks and go straight to the fixup table.
+ *
+ * They have great resemblance to the preempt_disable/enable calls
+ * and in fact they are identical; this is because currently there is
+ * no other way to make the pagefault handlers do this. So we do
+ * disable preemption but we don't necessarily care about that.
*/
-static inline void raw_pagefault_disable(void)
+static inline void pagefault_disable(void)
{
inc_preempt_count();
+ /*
+ * make sure to have issued the store before a pagefault
+ * can hit.
+ */
barrier();
}
-static inline void raw_pagefault_enable(void)
+static inline void pagefault_enable(void)
{
+ /*
+ * make sure to issue those last loads/stores before enabling
+ * the pagefault handler again.
+ */
barrier();
dec_preempt_count();
+ /*
+ * make sure we do..
+ */
barrier();
preempt_check_resched();
}
-#ifndef CONFIG_PREEMPT_RT_FULL
-static inline void pagefault_disable(void)
-{
- raw_pagefault_disable();
-}
-
-static inline void pagefault_enable(void)
-{
- raw_pagefault_enable();
-}
-#else
-extern void pagefault_disable(void);
-extern void pagefault_enable(void);
-#endif
-
#ifndef ARCH_HAS_NOCACHE_UACCESS
static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
@@ -76,9 +77,9 @@ static inline unsigned long __copy_from_user_nocache(void *to,
mm_segment_t old_fs = get_fs(); \
\
set_fs(KERNEL_DS); \
- raw_pagefault_disable(); \
+ pagefault_disable(); \
ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
- raw_pagefault_enable(); \
+ pagefault_enable(); \
set_fs(old_fs); \
ret; \
})
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index d115f62..06f28be 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -26,7 +26,6 @@
#include <linux/errno.h>
#include <linux/rbtree.h>
-#include <linux/wait.h>
struct vm_area_struct;
struct mm_struct;
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 52f944d..3fb4288 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -30,4 +30,24 @@
descriptor */
#define USB_QUIRK_DELAY_INIT 0x00000040
+/*
+ * For high speed and super speed interupt endpoints, the USB 2.0 and
+ * USB 3.0 spec require the interval in microframes
+ * (1 microframe = 125 microseconds) to be calculated as
+ * interval = 2 ^ (bInterval-1).
+ *
+ * Devices with this quirk report their bInterval as the result of this
+ * calculation instead of the exponent variable used in the calculation.
+ */
+#define USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL 0x00000080
+
+/* device generates spurious wakeup, ignore remote wakeup capability */
+#define USB_QUIRK_IGNORE_REMOTE_WAKEUP 0x00000200
+
+/* device generates spurious wakeup, ignore remote wakeup capability */
+#define USB_QUIRK_IGNORE_REMOTE_WAKEUP 0x00000200
+
+/* device can't handle device_qualifier descriptor requests */
+#define USB_QUIRK_DEVICE_QUALIFIER 0x00000100
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 4db2985..67c1108 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -17,6 +17,10 @@ struct uid_gid_map { /* 64 bytes -- 1 cache line */
} extent[UID_GID_MAP_MAX_EXTENTS];
};
+#define USERNS_SETGROUPS_ALLOWED 1UL
+
+#define USERNS_INIT_FLAGS USERNS_SETGROUPS_ALLOWED
+
struct user_namespace {
struct uid_gid_map uid_map;
struct uid_gid_map gid_map;
@@ -27,6 +31,7 @@ struct user_namespace {
kuid_t owner;
kgid_t group;
unsigned int proc_inum;
+ unsigned long flags;
};
extern struct user_namespace init_user_ns;
@@ -57,6 +62,9 @@ extern struct seq_operations proc_projid_seq_operations;
extern ssize_t proc_uid_map_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t proc_gid_map_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t, loff_t *);
+extern ssize_t proc_setgroups_write(struct file *, const char __user *, size_t, loff_t *);
+extern int proc_setgroups_show(struct seq_file *m, void *v);
+extern bool userns_may_setgroups(const struct user_namespace *ns);
#else
static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
@@ -81,6 +89,10 @@ static inline void put_user_ns(struct user_namespace *ns)
{
}
+static inline bool userns_may_setgroups(const struct user_namespace *ns)
+{
+ return true;
+}
#endif
#endif /* _LINUX_USER_H */
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index 502073a..b483abd 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -64,6 +64,7 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
+void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
#else
@@ -82,6 +83,7 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
+static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
#endif
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index c557c6d..3a712e2 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -71,12 +71,14 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_ZERO_PAGE_ALLOC,
THP_ZERO_PAGE_ALLOC_FAILED,
#endif
+#ifdef CONFIG_DEBUG_TLBFLUSH
#ifdef CONFIG_SMP
NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */
NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */
-#endif
+#endif /* CONFIG_SMP */
NR_TLB_LOCAL_FLUSH_ALL,
NR_TLB_LOCAL_FLUSH_ONE,
+#endif /* CONFIG_DEBUG_TLBFLUSH */
NR_VM_EVENT_ITEMS
};
diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
new file mode 100644
index 0000000..c3fa0fd4
--- /dev/null
+++ b/include/linux/vmacache.h
@@ -0,0 +1,38 @@
+#ifndef __LINUX_VMACACHE_H
+#define __LINUX_VMACACHE_H
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+/*
+ * Hash based on the page number. Provides a good hit rate for
+ * workloads with good locality and those with random accesses as well.
+ */
+#define VMACACHE_HASH(addr) ((addr >> PAGE_SHIFT) & VMACACHE_MASK)
+
+static inline void vmacache_flush(struct task_struct *tsk)
+{
+ memset(tsk->vmacache, 0, sizeof(tsk->vmacache));
+}
+
+extern void vmacache_flush_all(struct mm_struct *mm);
+extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
+extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
+ unsigned long addr);
+
+#ifndef CONFIG_MMU
+extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
+#endif
+
+static inline void vmacache_invalidate(struct mm_struct *mm)
+{
+ mm->vmacache_seqnum++;
+
+ /* deal with overflows */
+ if (unlikely(mm->vmacache_seqnum == 0))
+ vmacache_flush_all(mm);
+}
+
+#endif /* __LINUX_VMACACHE_H */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 1ea2fd5..67ce70c 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -29,9 +29,7 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
static inline void __count_vm_event(enum vm_event_item item)
{
- preempt_disable_rt();
__this_cpu_inc(vm_event_states.event[item]);
- preempt_enable_rt();
}
static inline void count_vm_event(enum vm_event_item item)
@@ -41,9 +39,7 @@ static inline void count_vm_event(enum vm_event_item item)
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
- preempt_disable_rt();
__this_cpu_add(vm_event_states.event[item], delta);
- preempt_enable_rt();
}
static inline void count_vm_events(enum vm_event_item item, long delta)
@@ -87,6 +83,14 @@ static inline void vm_events_fold_cpu(int cpu)
#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
#endif /* CONFIG_NUMA_BALANCING */
+#ifdef CONFIG_DEBUG_TLBFLUSH
+#define count_vm_tlb_event(x) count_vm_event(x)
+#define count_vm_tlb_events(x, y) count_vm_events(x, y)
+#else
+#define count_vm_tlb_event(x) do {} while (0)
+#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
+#endif
+
#define __count_zone_vm_events(item, zone, delta) \
__count_vm_events(item##_NORMAL - ZONE_NORMAL + \
zone_idx(zone), delta)
diff --git a/include/linux/wait-simple.h b/include/linux/wait-simple.h
deleted file mode 100644
index f86bca2..0000000
--- a/include/linux/wait-simple.h
+++ /dev/null
@@ -1,207 +0,0 @@
-#ifndef _LINUX_WAIT_SIMPLE_H
-#define _LINUX_WAIT_SIMPLE_H
-
-#include <linux/spinlock.h>
-#include <linux/list.h>
-
-#include <asm/current.h>
-
-struct swaiter {
- struct task_struct *task;
- struct list_head node;
-};
-
-#define DEFINE_SWAITER(name) \
- struct swaiter name = { \
- .task = current, \
- .node = LIST_HEAD_INIT((name).node), \
- }
-
-struct swait_head {
- raw_spinlock_t lock;
- struct list_head list;
-};
-
-#define SWAIT_HEAD_INITIALIZER(name) { \
- .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
- .list = LIST_HEAD_INIT((name).list), \
- }
-
-#define DEFINE_SWAIT_HEAD(name) \
- struct swait_head name = SWAIT_HEAD_INITIALIZER(name)
-
-extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key);
-
-#define init_swait_head(swh) \
- do { \
- static struct lock_class_key __key; \
- \
- __init_swait_head((swh), &__key); \
- } while (0)
-
-/*
- * Waiter functions
- */
-extern void swait_prepare_locked(struct swait_head *head, struct swaiter *w);
-extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state);
-extern void swait_finish_locked(struct swait_head *head, struct swaiter *w);
-extern void swait_finish(struct swait_head *head, struct swaiter *w);
-
-/* Check whether a head has waiters enqueued */
-static inline bool swaitqueue_active(struct swait_head *h)
-{
- /* Make sure the condition is visible before checking list_empty() */
- smp_mb();
- return !list_empty(&h->list);
-}
-
-/*
- * Wakeup functions
- */
-extern unsigned int __swait_wake(struct swait_head *head, unsigned int state, unsigned int num);
-extern unsigned int __swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num);
-
-#define swait_wake(head) __swait_wake(head, TASK_NORMAL, 1)
-#define swait_wake_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 1)
-#define swait_wake_all(head) __swait_wake(head, TASK_NORMAL, 0)
-#define swait_wake_all_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 0)
-
-/*
- * Event API
- */
-#define __swait_event(wq, condition) \
-do { \
- DEFINE_SWAITER(__wait); \
- \
- for (;;) { \
- swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
- if (condition) \
- break; \
- schedule(); \
- } \
- swait_finish(&wq, &__wait); \
-} while (0)
-
-/**
- * swait_event - sleep until a condition gets true
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- *
- * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
- * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- */
-#define swait_event(wq, condition) \
-do { \
- if (condition) \
- break; \
- __swait_event(wq, condition); \
-} while (0)
-
-#define __swait_event_interruptible(wq, condition, ret) \
-do { \
- DEFINE_SWAITER(__wait); \
- \
- for (;;) { \
- swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (signal_pending(current)) { \
- ret = -ERESTARTSYS; \
- break; \
- } \
- schedule(); \
- } \
- swait_finish(&wq, &__wait); \
-} while (0)
-
-#define __swait_event_interruptible_timeout(wq, condition, ret) \
-do { \
- DEFINE_SWAITER(__wait); \
- \
- for (;;) { \
- swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (signal_pending(current)) { \
- ret = -ERESTARTSYS; \
- break; \
- } \
- ret = schedule_timeout(ret); \
- if (!ret) \
- break; \
- } \
- swait_finish(&wq, &__wait); \
-} while (0)
-
-/**
- * swait_event_interruptible - sleep until a condition gets true
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- *
- * The process is put to sleep (TASK_INTERRUPTIBLE) until the
- * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- */
-#define swait_event_interruptible(wq, condition) \
-({ \
- int __ret = 0; \
- if (!(condition)) \
- __swait_event_interruptible(wq, condition, __ret); \
- __ret; \
-})
-
-#define swait_event_interruptible_timeout(wq, condition, timeout) \
-({ \
- int __ret = timeout; \
- if (!(condition)) \
- __swait_event_interruptible_timeout(wq, condition, __ret); \
- __ret; \
-})
-
-#define __swait_event_timeout(wq, condition, ret) \
-do { \
- DEFINE_SWAITER(__wait); \
- \
- for (;;) { \
- swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
- if (condition) \
- break; \
- ret = schedule_timeout(ret); \
- if (!ret) \
- break; \
- } \
- swait_finish(&wq, &__wait); \
-} while (0)
-
-/**
- * swait_event_timeout - sleep until a condition gets true or a timeout elapses
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- * @timeout: timeout, in jiffies
- *
- * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
- * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- *
- * The function returns 0 if the @timeout elapsed, and the remaining
- * jiffies if the condition evaluated to true before the timeout elapsed.
- */
-#define swait_event_timeout(wq, condition, timeout) \
-({ \
- long __ret = timeout; \
- if (!(condition)) \
- __swait_event_timeout(wq, condition, __ret); \
- __ret; \
-})
-
-#endif
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 68f7245..a67fc16 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -7,7 +7,6 @@
#include <linux/spinlock.h>
#include <asm/current.h>
#include <uapi/linux/wait.h>
-#include <linux/atomic.h>
typedef struct __wait_queue wait_queue_t;
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 594521b..eff358e 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -455,7 +455,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \
1, (name))
#define create_singlethread_workqueue(name) \
- alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, (name))
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
extern void destroy_workqueue(struct workqueue_struct *wq);