summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c6
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/context_tracking.c80
-rw-r--r--kernel/events/core.c21
-rw-r--r--kernel/irq/internals.h5
-rw-r--r--kernel/irq/pm.c3
-rw-r--r--kernel/irq/proc.c2
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/kexec_core.c4
-rw-r--r--kernel/kexec_file.c2
-rw-r--r--kernel/livepatch/core.c6
-rw-r--r--kernel/locking/lockdep.c2
-rw-r--r--kernel/memremap.c16
-rw-r--r--kernel/panic.c13
-rw-r--r--kernel/params.c20
-rw-r--r--kernel/pid.c4
-rw-r--r--kernel/power/snapshot.c2
-rw-r--r--kernel/power/swap.c16
-rw-r--r--kernel/printk/printk.c13
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/signal.c55
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/sys.c4
-rw-r--r--kernel/time/clocksource.c2
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--kernel/time/timer.c22
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/trace/ring_buffer.c17
-rw-r--r--kernel/trace/trace.c16
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_functions_graph.c6
31 files changed, 176 insertions, 174 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index 8a056a3..5ffcbd3 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1371,16 +1371,16 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
if (unlikely(audit_filter_type(type)))
return NULL;
- if (gfp_mask & __GFP_WAIT) {
+ if (gfp_mask & __GFP_DIRECT_RECLAIM) {
if (audit_pid && audit_pid == current->pid)
- gfp_mask &= ~__GFP_WAIT;
+ gfp_mask &= ~__GFP_DIRECT_RECLAIM;
else
reserve = 0;
}
while (audit_backlog_limit
&& skb_queue_len(&audit_skb_queue) > audit_backlog_limit + reserve) {
- if (gfp_mask & __GFP_WAIT && audit_backlog_wait_time) {
+ if (gfp_mask & __GFP_DIRECT_RECLAIM && audit_backlog_wait_time) {
long sleep_time;
sleep_time = timeout_start + audit_backlog_wait_time - jiffies;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index b9d0cce..f1603c1 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -299,7 +299,7 @@ static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
idr_preload(gfp_mask);
spin_lock_bh(&cgroup_idr_lock);
- ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_WAIT);
+ ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_DIRECT_RECLAIM);
spin_unlock_bh(&cgroup_idr_lock);
idr_preload_end();
return ret;
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 0a495ab..d8560ee 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -58,36 +58,13 @@ static void context_tracking_recursion_exit(void)
* instructions to execute won't use any RCU read side critical section
* because this function sets RCU in extended quiescent state.
*/
-void context_tracking_enter(enum ctx_state state)
+void __context_tracking_enter(enum ctx_state state)
{
- unsigned long flags;
-
- /*
- * Repeat the user_enter() check here because some archs may be calling
- * this from asm and if no CPU needs context tracking, they shouldn't
- * go further. Repeat the check here until they support the inline static
- * key check.
- */
- if (!context_tracking_is_enabled())
- return;
-
- /*
- * Some contexts may involve an exception occuring in an irq,
- * leading to that nesting:
- * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
- * This would mess up the dyntick_nesting count though. And rcu_irq_*()
- * helpers are enough to protect RCU uses inside the exception. So
- * just return immediately if we detect we are in an IRQ.
- */
- if (in_interrupt())
- return;
-
/* Kernel threads aren't supposed to go to userspace */
WARN_ON_ONCE(!current->mm);
- local_irq_save(flags);
if (!context_tracking_recursion_enter())
- goto out_irq_restore;
+ return;
if ( __this_cpu_read(context_tracking.state) != state) {
if (__this_cpu_read(context_tracking.active)) {
@@ -120,7 +97,27 @@ void context_tracking_enter(enum ctx_state state)
__this_cpu_write(context_tracking.state, state);
}
context_tracking_recursion_exit();
-out_irq_restore:
+}
+NOKPROBE_SYMBOL(__context_tracking_enter);
+EXPORT_SYMBOL_GPL(__context_tracking_enter);
+
+void context_tracking_enter(enum ctx_state state)
+{
+ unsigned long flags;
+
+ /*
+ * Some contexts may involve an exception occuring in an irq,
+ * leading to that nesting:
+ * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
+ * This would mess up the dyntick_nesting count though. And rcu_irq_*()
+ * helpers are enough to protect RCU uses inside the exception. So
+ * just return immediately if we detect we are in an IRQ.
+ */
+ if (in_interrupt())
+ return;
+
+ local_irq_save(flags);
+ __context_tracking_enter(state);
local_irq_restore(flags);
}
NOKPROBE_SYMBOL(context_tracking_enter);
@@ -128,7 +125,7 @@ EXPORT_SYMBOL_GPL(context_tracking_enter);
void context_tracking_user_enter(void)
{
- context_tracking_enter(CONTEXT_USER);
+ user_enter();
}
NOKPROBE_SYMBOL(context_tracking_user_enter);
@@ -144,19 +141,10 @@ NOKPROBE_SYMBOL(context_tracking_user_enter);
* This call supports re-entrancy. This way it can be called from any exception
* handler without needing to know if we came from userspace or not.
*/
-void context_tracking_exit(enum ctx_state state)
+void __context_tracking_exit(enum ctx_state state)
{
- unsigned long flags;
-
- if (!context_tracking_is_enabled())
- return;
-
- if (in_interrupt())
- return;
-
- local_irq_save(flags);
if (!context_tracking_recursion_enter())
- goto out_irq_restore;
+ return;
if (__this_cpu_read(context_tracking.state) == state) {
if (__this_cpu_read(context_tracking.active)) {
@@ -173,7 +161,19 @@ void context_tracking_exit(enum ctx_state state)
__this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
}
context_tracking_recursion_exit();
-out_irq_restore:
+}
+NOKPROBE_SYMBOL(__context_tracking_exit);
+EXPORT_SYMBOL_GPL(__context_tracking_exit);
+
+void context_tracking_exit(enum ctx_state state)
+{
+ unsigned long flags;
+
+ if (in_interrupt())
+ return;
+
+ local_irq_save(flags);
+ __context_tracking_exit(state);
local_irq_restore(flags);
}
NOKPROBE_SYMBOL(context_tracking_exit);
@@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(context_tracking_exit);
void context_tracking_user_exit(void)
{
- context_tracking_exit(CONTEXT_USER);
+ user_exit();
}
NOKPROBE_SYMBOL(context_tracking_user_exit);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 1a734e0..36babfd 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1050,13 +1050,13 @@ retry:
/*
* One of the few rules of preemptible RCU is that one cannot do
* rcu_read_unlock() while holding a scheduler (or nested) lock when
- * part of the read side critical section was preemptible -- see
+ * part of the read side critical section was irqs-enabled -- see
* rcu_read_unlock_special().
*
* Since ctx->lock nests under rq->lock we must ensure the entire read
- * side critical section is non-preemptible.
+ * side critical section has interrupts disabled.
*/
- preempt_disable();
+ local_irq_save(*flags);
rcu_read_lock();
ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
if (ctx) {
@@ -1070,21 +1070,22 @@ retry:
* if so. If we locked the right context, then it
* can't get swapped on us any more.
*/
- raw_spin_lock_irqsave(&ctx->lock, *flags);
+ raw_spin_lock(&ctx->lock);
if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
- raw_spin_unlock_irqrestore(&ctx->lock, *flags);
+ raw_spin_unlock(&ctx->lock);
rcu_read_unlock();
- preempt_enable();
+ local_irq_restore(*flags);
goto retry;
}
if (!atomic_inc_not_zero(&ctx->refcount)) {
- raw_spin_unlock_irqrestore(&ctx->lock, *flags);
+ raw_spin_unlock(&ctx->lock);
ctx = NULL;
}
}
rcu_read_unlock();
- preempt_enable();
+ if (!ctx)
+ local_irq_restore(*flags);
return ctx;
}
@@ -6913,6 +6914,10 @@ static int perf_tp_filter_match(struct perf_event *event,
{
void *record = data->raw->data;
+ /* only top level events have filters set */
+ if (event->parent)
+ event = event->parent;
+
if (likely(!event->filter) || filter_match_preds(event->filter, record))
return 1;
return 0;
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 05c2188..fcab63c 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -199,6 +199,11 @@ static inline int irq_desc_get_node(struct irq_desc *desc)
return irq_common_data_get_node(&desc->irq_common_data);
}
+static inline int irq_desc_is_chained(struct irq_desc *desc)
+{
+ return (desc->action && desc->action == &chained_action);
+}
+
#ifdef CONFIG_PM_SLEEP
bool irq_pm_check_wakeup(struct irq_desc *desc);
void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action);
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index e80c440..cea1de0 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -70,7 +70,8 @@ void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action)
static bool suspend_device_irq(struct irq_desc *desc)
{
- if (!desc->action || desc->no_suspend_depth)
+ if (!desc->action || irq_desc_is_chained(desc) ||
+ desc->no_suspend_depth)
return false;
if (irqd_is_wakeup_set(&desc->irq_data)) {
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index a916cf1..a2c02fd 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -475,7 +475,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
any_count |= kstat_irqs_cpu(i, j);
action = desc->action;
- if ((!action || action == &chained_action) && !any_count)
+ if ((!action || irq_desc_is_chained(desc)) && !any_count)
goto out;
seq_printf(p, "%*d: ", prec, i);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 4c5edc3..d873b64 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -6,6 +6,8 @@
* Version 2. See the file COPYING for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/capability.h>
#include <linux/mm.h>
#include <linux/file.h>
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index bd9f8a0..11b64a6 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -6,7 +6,7 @@
* Version 2. See the file COPYING for more details.
*/
-#define pr_fmt(fmt) "kexec: " fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/capability.h>
#include <linux/mm.h>
@@ -1027,7 +1027,7 @@ static int __init crash_notes_memory_init(void)
crash_notes = __alloc_percpu(size, align);
if (!crash_notes) {
- pr_warn("Kexec: Memory allocation for saving cpu register states failed\n");
+ pr_warn("Memory allocation for saving cpu register states failed\n");
return -ENOMEM;
}
return 0;
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index 6a9a3f2..b70ada0 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -9,6 +9,8 @@
* Version 2. See the file COPYING for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/capability.h>
#include <linux/mm.h>
#include <linux/file.h>
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 6e53441..db545cb 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -294,6 +294,12 @@ static int klp_write_object_relocations(struct module *pmod,
for (reloc = obj->relocs; reloc->name; reloc++) {
if (!klp_is_module(obj)) {
+
+#if defined(CONFIG_RANDOMIZE_BASE)
+ /* If KASLR has been enabled, adjust old value accordingly */
+ if (kaslr_enabled())
+ reloc->val += kaslr_offset();
+#endif
ret = klp_verify_vmlinux_symbol(reloc->name,
reloc->val);
if (ret)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 4e49cc4..deae390 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2738,7 +2738,7 @@ static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
return;
/* no reclaim without waiting on it */
- if (!(gfp_mask & __GFP_WAIT))
+ if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
return;
/* this guy won't enter reclaim */
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 9d6b555..7658d32 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -124,9 +124,10 @@ void *devm_memremap(struct device *dev, resource_size_t offset,
{
void **ptr, *addr;
- ptr = devres_alloc(devm_memremap_release, sizeof(*ptr), GFP_KERNEL);
+ ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
+ dev_to_node(dev));
if (!ptr)
- return NULL;
+ return ERR_PTR(-ENOMEM);
addr = memremap(offset, size, flags);
if (addr) {
@@ -141,9 +142,8 @@ EXPORT_SYMBOL(devm_memremap);
void devm_memunmap(struct device *dev, void *addr)
{
- WARN_ON(devres_destroy(dev, devm_memremap_release, devm_memremap_match,
- addr));
- memunmap(addr);
+ WARN_ON(devres_release(dev, devm_memremap_release,
+ devm_memremap_match, addr));
}
EXPORT_SYMBOL(devm_memunmap);
@@ -176,8 +176,8 @@ void *devm_memremap_pages(struct device *dev, struct resource *res)
if (is_ram == REGION_INTERSECTS)
return __va(res->start);
- page_map = devres_alloc(devm_memremap_pages_release,
- sizeof(*page_map), GFP_KERNEL);
+ page_map = devres_alloc_node(devm_memremap_pages_release,
+ sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
if (!page_map)
return ERR_PTR(-ENOMEM);
@@ -185,7 +185,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res)
nid = dev_to_node(dev);
if (nid < 0)
- nid = 0;
+ nid = numa_mem_id();
error = arch_add_memory(nid, res->start, resource_size(res), true);
if (error) {
diff --git a/kernel/panic.c b/kernel/panic.c
index 04e91ff..4b150bc 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -23,6 +23,7 @@
#include <linux/sysrq.h>
#include <linux/init.h>
#include <linux/nmi.h>
+#include <linux/console.h>
#define PANIC_TIMER_STEP 100
#define PANIC_BLINK_SPD 18
@@ -147,6 +148,18 @@ void panic(const char *fmt, ...)
bust_spinlocks(0);
+ /*
+ * We may have ended up stopping the CPU holding the lock (in
+ * smp_send_stop()) while still having some valuable data in the console
+ * buffer. Try to acquire the lock then release it regardless of the
+ * result. The release will also print the buffers out. Locks debug
+ * should be disabled to avoid reporting bad unlock balance when
+ * panic() is not being callled from OOPS.
+ */
+ debug_locks_off();
+ console_trylock();
+ console_unlock();
+
if (!panic_blink)
panic_blink = no_blink;
diff --git a/kernel/params.c b/kernel/params.c
index b6554aa..a6d6149 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -223,7 +223,7 @@ char *parse_args(const char *doing,
int (*unknown)(char *param, char *val,
const char *doing, void *arg))
{
- char *param, *val;
+ char *param, *val, *err = NULL;
/* Chew leading spaces */
args = skip_spaces(args);
@@ -238,7 +238,7 @@ char *parse_args(const char *doing,
args = next_arg(args, &param, &val);
/* Stop at -- */
if (!val && strcmp(param, "--") == 0)
- return args;
+ return err ?: args;
irq_was_disabled = irqs_disabled();
ret = parse_one(param, val, doing, params, num,
min_level, max_level, arg, unknown);
@@ -247,24 +247,25 @@ char *parse_args(const char *doing,
doing, param);
switch (ret) {
+ case 0:
+ continue;
case -ENOENT:
pr_err("%s: Unknown parameter `%s'\n", doing, param);
- return ERR_PTR(ret);
+ break;
case -ENOSPC:
pr_err("%s: `%s' too large for parameter `%s'\n",
doing, val ?: "", param);
- return ERR_PTR(ret);
- case 0:
break;
default:
pr_err("%s: `%s' invalid for parameter `%s'\n",
doing, val ?: "", param);
- return ERR_PTR(ret);
+ break;
}
+
+ err = ERR_PTR(ret);
}
- /* All parsed OK. */
- return NULL;
+ return err;
}
/* Lazy bastard, eh? */
@@ -325,10 +326,11 @@ int param_get_charp(char *buffer, const struct kernel_param *kp)
}
EXPORT_SYMBOL(param_get_charp);
-static void param_free_charp(void *arg)
+void param_free_charp(void *arg)
{
maybe_kfree_parameter(*((char **)arg));
}
+EXPORT_SYMBOL(param_free_charp);
const struct kernel_param_ops param_ops_charp = {
.set = param_set_charp,
diff --git a/kernel/pid.c b/kernel/pid.c
index ca36879..78b3d9f 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -467,7 +467,7 @@ struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
rcu_read_lock();
if (type != PIDTYPE_PID)
task = task->group_leader;
- pid = get_pid(task->pids[type].pid);
+ pid = get_pid(rcu_dereference(task->pids[type].pid));
rcu_read_unlock();
return pid;
}
@@ -528,7 +528,7 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
if (likely(pid_alive(task))) {
if (type != PIDTYPE_PID)
task = task->group_leader;
- nr = pid_nr_ns(task->pids[type].pid, ns);
+ nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns);
}
rcu_read_unlock();
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 5235dd4..3a97060 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1779,7 +1779,7 @@ alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
while (to_alloc-- > 0) {
struct page *page;
- page = alloc_image_page(__GFP_HIGHMEM);
+ page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
memory_bm_set_bit(bm, page_to_pfn(page));
}
return nr_highmem;
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index b2066fb..12cd989 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -257,7 +257,7 @@ static int hib_submit_io(int rw, pgoff_t page_off, void *addr,
struct bio *bio;
int error = 0;
- bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
+ bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1);
bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
bio->bi_bdev = hib_resume_bdev;
@@ -356,7 +356,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
return -ENOSPC;
if (hb) {
- src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN |
+ src = (void *)__get_free_page(__GFP_RECLAIM | __GFP_NOWARN |
__GFP_NORETRY);
if (src) {
copy_page(src, buf);
@@ -364,7 +364,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
ret = hib_wait_io(hb); /* Free pages */
if (ret)
return ret;
- src = (void *)__get_free_page(__GFP_WAIT |
+ src = (void *)__get_free_page(__GFP_RECLAIM |
__GFP_NOWARN |
__GFP_NORETRY);
if (src) {
@@ -672,7 +672,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
nr_threads = num_online_cpus() - 1;
nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
- page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
+ page = (void *)__get_free_page(__GFP_RECLAIM | __GFP_HIGH);
if (!page) {
printk(KERN_ERR "PM: Failed to allocate LZO page\n");
ret = -ENOMEM;
@@ -975,7 +975,7 @@ static int get_swap_reader(struct swap_map_handle *handle,
last = tmp;
tmp->map = (struct swap_map_page *)
- __get_free_page(__GFP_WAIT | __GFP_HIGH);
+ __get_free_page(__GFP_RECLAIM | __GFP_HIGH);
if (!tmp->map) {
release_swap_reader(handle);
return -ENOMEM;
@@ -1242,9 +1242,9 @@ static int load_image_lzo(struct swap_map_handle *handle,
for (i = 0; i < read_pages; i++) {
page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
- __GFP_WAIT | __GFP_HIGH :
- __GFP_WAIT | __GFP_NOWARN |
- __GFP_NORETRY);
+ __GFP_RECLAIM | __GFP_HIGH :
+ __GFP_RECLAIM | __GFP_NOWARN |
+ __GFP_NORETRY);
if (!page[i]) {
if (i < LZO_CMP_PAGES) {
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index b16f354..2ce8826 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -269,6 +269,9 @@ static u32 clear_idx;
#define PREFIX_MAX 32
#define LOG_LINE_MAX (1024 - PREFIX_MAX)
+#define LOG_LEVEL(v) ((v) & 0x07)
+#define LOG_FACILITY(v) ((v) >> 3 & 0xff)
+
/* record buffer */
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
#define LOG_ALIGN 4
@@ -612,7 +615,6 @@ struct devkmsg_user {
static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
{
char *buf, *line;
- int i;
int level = default_message_loglevel;
int facility = 1; /* LOG_USER */
size_t len = iov_iter_count(from);
@@ -642,12 +644,13 @@ static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
line = buf;
if (line[0] == '<') {
char *endp = NULL;
+ unsigned int u;
- i = simple_strtoul(line+1, &endp, 10);
+ u = simple_strtoul(line + 1, &endp, 10);
if (endp && endp[0] == '>') {
- level = i & 7;
- if (i >> 3)
- facility = i >> 3;
+ level = LOG_LEVEL(u);
+ if (LOG_FACILITY(u) != 0)
+ facility = LOG_FACILITY(u);
endp++;
len -= endp - line;
line = endp;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 824aa9f..f04fda8 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2302,7 +2302,7 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr)
now = curr->se.sum_exec_runtime;
period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
- if (now - curr->node_stamp > period) {
+ if (now > curr->node_stamp + period) {
if (!curr->node_stamp)
curr->numa_scan_period = task_scan_min(curr);
curr->node_stamp += period;
diff --git a/kernel/signal.c b/kernel/signal.c
index 0f6bbbe..f3f1f7a 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -503,41 +503,6 @@ int unhandled_signal(struct task_struct *tsk, int sig)
return !tsk->ptrace;
}
-/*
- * Notify the system that a driver wants to block all signals for this
- * process, and wants to be notified if any signals at all were to be
- * sent/acted upon. If the notifier routine returns non-zero, then the
- * signal will be acted upon after all. If the notifier routine returns 0,
- * then then signal will be blocked. Only one block per process is
- * allowed. priv is a pointer to private data that the notifier routine
- * can use to determine if the signal should be blocked or not.
- */
-void
-block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&current->sighand->siglock, flags);
- current->notifier_mask = mask;
- current->notifier_data = priv;
- current->notifier = notifier;
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
-}
-
-/* Notify the system that blocking has ended. */
-
-void
-unblock_all_signals(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&current->sighand->siglock, flags);
- current->notifier = NULL;
- current->notifier_data = NULL;
- recalc_sigpending();
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
-}
-
static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
{
struct sigqueue *q, *first = NULL;
@@ -580,19 +545,8 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
{
int sig = next_signal(pending, mask);
- if (sig) {
- if (current->notifier) {
- if (sigismember(current->notifier_mask, sig)) {
- if (!(current->notifier)(current->notifier_data)) {
- clear_thread_flag(TIF_SIGPENDING);
- return 0;
- }
- }
- }
-
+ if (sig)
collect_signal(sig, pending, info);
- }
-
return sig;
}
@@ -834,7 +788,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
sigset_t flush;
if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
- if (signal->flags & SIGNAL_GROUP_COREDUMP)
+ if (!(signal->flags & SIGNAL_GROUP_EXIT))
return sig == SIGKILL;
/*
* The process is in the middle of dying, nothing to do.
@@ -2483,9 +2437,6 @@ EXPORT_SYMBOL(force_sig);
EXPORT_SYMBOL(send_sig);
EXPORT_SYMBOL(send_sig_info);
EXPORT_SYMBOL(sigprocmask);
-EXPORT_SYMBOL(block_all_signals);
-EXPORT_SYMBOL(unblock_all_signals);
-
/*
* System call entry points.
@@ -3552,7 +3503,7 @@ SYSCALL_DEFINE0(pause)
#endif
-int sigsuspend(sigset_t *set)
+static int sigsuspend(sigset_t *set)
{
current->saved_sigmask = current->blocked;
set_current_blocked(set);
diff --git a/kernel/smp.c b/kernel/smp.c
index 0785447..d903c02 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -669,7 +669,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
cpumask_var_t cpus;
int cpu, ret;
- might_sleep_if(gfp_flags & __GFP_WAIT);
+ might_sleep_if(gfpflags_allow_blocking(gfp_flags));
if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
preempt_disable();
diff --git a/kernel/sys.c b/kernel/sys.c
index fa2f2f6..6af9212 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -222,7 +222,7 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
goto out_unlock; /* No processes for this user */
}
do_each_thread(g, p) {
- if (uid_eq(task_uid(p), uid))
+ if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
error = set_one_prio(p, niceval, error);
} while_each_thread(g, p);
if (!uid_eq(uid, cred->uid))
@@ -290,7 +290,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
goto out_unlock; /* No processes for this user */
}
do_each_thread(g, p) {
- if (uid_eq(task_uid(p), uid)) {
+ if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
niceval = nice_to_rlimit(task_nice(p));
if (niceval > retval)
retval = niceval;
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 0d8fe8b..1347882 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -217,7 +217,7 @@ static void clocksource_watchdog(unsigned long data)
continue;
/* Check the deviation from the watchdog clocksource. */
- if (abs64(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
+ if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable because the skew is too large:\n",
cs->name);
pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index b1356b7..d563c19 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1614,7 +1614,7 @@ static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
negative = (tick_error < 0);
/* Sort out the magnitude of the correction */
- tick_error = abs64(tick_error);
+ tick_error = abs(tick_error);
for (adj = 0; tick_error > interval; adj++)
tick_error >>= 1;
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 74591ba..bbc5d11 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -977,13 +977,29 @@ EXPORT_SYMBOL(add_timer);
*/
void add_timer_on(struct timer_list *timer, int cpu)
{
- struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu);
+ struct tvec_base *new_base = per_cpu_ptr(&tvec_bases, cpu);
+ struct tvec_base *base;
unsigned long flags;
timer_stats_timer_set_start_info(timer);
BUG_ON(timer_pending(timer) || !timer->function);
- spin_lock_irqsave(&base->lock, flags);
- timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
+
+ /*
+ * If @timer was on a different CPU, it should be migrated with the
+ * old base locked to prevent other operations proceeding with the
+ * wrong base locked. See lock_timer_base().
+ */
+ base = lock_timer_base(timer, &flags);
+ if (base != new_base) {
+ timer->flags |= TIMER_MIGRATING;
+
+ spin_unlock(&base->lock);
+ base = new_base;
+ spin_lock(&base->lock);
+ WRITE_ONCE(timer->flags,
+ (timer->flags & ~TIMER_BASEMASK) | cpu);
+ }
+
debug_activate(timer, timer->expires);
internal_add_timer(base, timer);
spin_unlock_irqrestore(&base->lock, flags);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 8d6363f..e45db6b 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -434,7 +434,7 @@ config UPROBE_EVENT
config BPF_EVENTS
depends on BPF_SYSCALL
- depends on KPROBE_EVENT || UPROBE_EVENT
+ depends on (KPROBE_EVENT || UPROBE_EVENT) && PERF_EVENTS
bool
default y
help
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 75f1d05..9c6045a 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1887,12 +1887,6 @@ rb_event_index(struct ring_buffer_event *event)
return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
}
-static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
-{
- cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
- cpu_buffer->reader_page->read = 0;
-}
-
static void rb_inc_iter(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
@@ -2803,8 +2797,11 @@ rb_reserve_next_event(struct ring_buffer *buffer,
event = __rb_reserve_next(cpu_buffer, &info);
- if (unlikely(PTR_ERR(event) == -EAGAIN))
+ if (unlikely(PTR_ERR(event) == -EAGAIN)) {
+ if (info.add_timestamp)
+ info.length -= RB_LEN_TIME_EXTEND;
goto again;
+ }
if (!event)
goto out_fail;
@@ -3626,7 +3623,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
/* Finally update the reader page to the new head */
cpu_buffer->reader_page = reader;
- rb_reset_reader_page(cpu_buffer);
+ cpu_buffer->reader_page->read = 0;
if (overwrite != cpu_buffer->last_overrun) {
cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
@@ -3636,6 +3633,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
goto again;
out:
+ /* Update the read_stamp on the first event */
+ if (reader && reader->read == 0)
+ cpu_buffer->read_stamp = reader->page->time_stamp;
+
arch_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 2198a63..87fb980 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -100,8 +100,6 @@ static DEFINE_PER_CPU(bool, trace_cmdline_save);
*/
static int tracing_disabled = 1;
-DEFINE_PER_CPU(int, ftrace_cpu_disabled);
-
cpumask_var_t __read_mostly tracing_buffer_mask;
/*
@@ -1775,10 +1773,6 @@ trace_function(struct trace_array *tr,
struct ring_buffer_event *event;
struct ftrace_entry *entry;
- /* If we are reading the ring buffer, don't trace */
- if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
- return;
-
event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
flags, pc);
if (!event)
@@ -4554,6 +4548,8 @@ out:
return ret;
}
+#ifdef CONFIG_TRACER_MAX_TRACE
+
static ssize_t
tracing_max_lat_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
@@ -4568,6 +4564,8 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
}
+#endif
+
static int tracing_open_pipe(struct inode *inode, struct file *filp)
{
struct trace_array *tr = inode->i_private;
@@ -5469,12 +5467,14 @@ static const struct file_operations tracing_thresh_fops = {
.llseek = generic_file_llseek,
};
+#ifdef CONFIG_TRACER_MAX_TRACE
static const struct file_operations tracing_max_lat_fops = {
.open = tracing_open_generic,
.read = tracing_max_lat_read,
.write = tracing_max_lat_write,
.llseek = generic_file_llseek,
};
+#endif
static const struct file_operations set_tracer_fops = {
.open = tracing_open_generic,
@@ -6847,7 +6847,9 @@ struct dentry *tracing_init_dentry(void)
if (tr->dir)
return NULL;
- if (WARN_ON(!debugfs_initialized()))
+ if (WARN_ON(!tracefs_initialized()) ||
+ (IS_ENABLED(CONFIG_DEBUG_FS) &&
+ WARN_ON(!debugfs_initialized())))
return ERR_PTR(-ENODEV);
/*
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index dd76208..919d9d0 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -667,7 +667,6 @@ extern int DYN_FTRACE_TEST_NAME2(void);
extern bool ring_buffer_expanded;
extern bool tracing_selftest_disabled;
-DECLARE_PER_CPU(int, ftrace_cpu_disabled);
#ifdef CONFIG_FTRACE_STARTUP_TEST
extern int trace_selftest_startup_function(struct tracer *trace,
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 92382af..a663cbb 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -288,9 +288,6 @@ int __trace_graph_entry(struct trace_array *tr,
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ftrace_graph_ent_entry *entry;
- if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
- return 0;
-
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
sizeof(*entry), flags, pc);
if (!event)
@@ -403,9 +400,6 @@ void __trace_graph_return(struct trace_array *tr,
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ftrace_graph_ret_entry *entry;
- if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
- return;
-
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
sizeof(*entry), flags, pc);
if (!event)