summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c14
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/printk.c33
-rw-r--r--kernel/signal.c11
-rw-r--r--kernel/softirq.c11
-rw-r--r--kernel/sysctl.c12
-rw-r--r--kernel/trace/Kconfig10
-rw-r--r--kernel/trace/ring_buffer.c3
-rw-r--r--kernel/trace/trace.c23
-rw-r--r--kernel/user.c67
10 files changed, 121 insertions, 65 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 4430eb1..be022c2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -178,7 +178,7 @@ void __init fork_init(unsigned long mempages)
/* create a slab on which task_structs can be allocated */
task_struct_cachep =
kmem_cache_create("task_struct", sizeof(struct task_struct),
- ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL);
+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
#endif
/* do the arch specific task caches init */
@@ -1470,20 +1470,20 @@ void __init proc_caches_init(void)
{
sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
- sighand_ctor);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
+ SLAB_NOTRACK, sighand_ctor);
signal_cachep = kmem_cache_create("signal_cache",
sizeof(struct signal_struct), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
files_cachep = kmem_cache_create("files_cache",
sizeof(struct files_struct), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
fs_cachep = kmem_cache_create("fs_cache",
sizeof(struct fs_struct), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
mmap_init();
}
diff --git a/kernel/module.c b/kernel/module.c
index e4ab36c..215aaab 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2899,7 +2899,7 @@ void print_modules(void)
struct module *mod;
char buf[8];
- printk("Modules linked in:");
+ printk(KERN_DEFAULT "Modules linked in:");
/* Most callers should already have preempt disabled, but make sure */
preempt_disable();
list_for_each_entry_rcu(mod, &modules, list)
diff --git a/kernel/printk.c b/kernel/printk.c
index 5052b54..b4d97b5 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -687,20 +687,35 @@ asmlinkage int vprintk(const char *fmt, va_list args)
sizeof(printk_buf) - printed_len, fmt, args);
+ p = printk_buf;
+
+ /* Do we have a loglevel in the string? */
+ if (p[0] == '<') {
+ unsigned char c = p[1];
+ if (c && p[2] == '>') {
+ switch (c) {
+ case '0' ... '7': /* loglevel */
+ current_log_level = c - '0';
+ /* Fallthrough - make sure we're on a new line */
+ case 'd': /* KERN_DEFAULT */
+ if (!new_text_line) {
+ emit_log_char('\n');
+ new_text_line = 1;
+ }
+ /* Fallthrough - skip the loglevel */
+ case 'c': /* KERN_CONT */
+ p += 3;
+ break;
+ }
+ }
+ }
+
/*
* Copy the output into log_buf. If the caller didn't provide
* appropriate log level tags, we insert them here
*/
- for (p = printk_buf; *p; p++) {
+ for ( ; *p; p++) {
if (new_text_line) {
- /* If a token, set current_log_level and skip over */
- if (p[0] == '<' && p[1] >= '0' && p[1] <= '7' &&
- p[2] == '>') {
- current_log_level = p[1] - '0';
- p += 3;
- printed_len -= 3;
- }
-
/* Always output the token */
emit_log_char('<');
emit_log_char(current_log_level + '0');
diff --git a/kernel/signal.c b/kernel/signal.c
index 809a228..d81f495 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -832,6 +832,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
{
struct sigpending *pending;
struct sigqueue *q;
+ int override_rlimit;
trace_sched_signal_send(sig, t);
@@ -863,9 +864,13 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
make sure at least one signal gets delivered and don't
pass on the info struct. */
- q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
- (is_si_special(info) ||
- info->si_code >= 0)));
+ if (sig < SIGRTMIN)
+ override_rlimit = (is_si_special(info) || info->si_code >= 0);
+ else
+ override_rlimit = 0;
+
+ q = __sigqueue_alloc(t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
+ override_rlimit);
if (q) {
list_add_tail(&q->list, &pending->list);
switch ((unsigned long) info) {
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 258885a..b41fb71 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -382,6 +382,17 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
EXPORT_SYMBOL(__tasklet_hi_schedule);
+void __tasklet_hi_schedule_first(struct tasklet_struct *t)
+{
+ BUG_ON(!irqs_disabled());
+
+ t->next = __get_cpu_var(tasklet_hi_vec).head;
+ __get_cpu_var(tasklet_hi_vec).head = t;
+ __raise_softirq_irqoff(HI_SOFTIRQ);
+}
+
+EXPORT_SYMBOL(__tasklet_hi_schedule_first);
+
static void tasklet_action(struct softirq_action *a)
{
struct tasklet_struct *list;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 2ccee08..ab462b9 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -27,6 +27,7 @@
#include <linux/security.h>
#include <linux/ctype.h>
#include <linux/utsname.h>
+#include <linux/kmemcheck.h>
#include <linux/smp_lock.h>
#include <linux/fs.h>
#include <linux/init.h>
@@ -967,6 +968,17 @@ static struct ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
#endif
+#ifdef CONFIG_KMEMCHECK
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "kmemcheck",
+ .data = &kmemcheck_enabled,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+#endif
+
/*
* NOTE: do not add new entries to this table unless you have read
* Documentation/sysctl/ctl_unnumbered.txt
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 4a13e5a..61071fe 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -147,7 +147,7 @@ config IRQSOFF_TRACER
disabled by default and can be runtime (re-)started
via:
- echo 0 > /debugfs/tracing/tracing_max_latency
+ echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
(Note that kernel size and overhead increases with this option
enabled. This option and the preempt-off timing option can be
@@ -168,7 +168,7 @@ config PREEMPT_TRACER
disabled by default and can be runtime (re-)started
via:
- echo 0 > /debugfs/tracing/tracing_max_latency
+ echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
(Note that kernel size and overhead increases with this option
enabled. This option and the irqs-off timing option can be
@@ -261,7 +261,7 @@ config PROFILE_ANNOTATED_BRANCHES
This tracer profiles all the the likely and unlikely macros
in the kernel. It will display the results in:
- /debugfs/tracing/profile_annotated_branch
+ /sys/kernel/debug/tracing/profile_annotated_branch
Note: this will add a significant overhead, only turn this
on if you need to profile the system's use of these macros.
@@ -274,7 +274,7 @@ config PROFILE_ALL_BRANCHES
taken in the kernel is recorded whether it hit or miss.
The results will be displayed in:
- /debugfs/tracing/profile_branch
+ /sys/kernel/debug/tracing/profile_branch
This option also enables the likely/unlikely profiler.
@@ -323,7 +323,7 @@ config STACK_TRACER
select KALLSYMS
help
This special tracer records the maximum stack footprint of the
- kernel and displays it in debugfs/tracing/stack_trace.
+ kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
This tracer works by hooking into every function call that the
kernel executes, and keeping a maximum stack depth value and
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 2e642b2..dc4dc70 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -10,6 +10,7 @@
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
+#include <linux/kmemcheck.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/mutex.h>
@@ -1270,6 +1271,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
if (tail < BUF_PAGE_SIZE) {
/* Mark the rest of the page with padding */
event = __rb_page_index(tail_page, tail);
+ kmemcheck_annotate_bitfield(event, bitfield);
rb_event_set_padding(event);
}
@@ -1327,6 +1329,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
return NULL;
event = __rb_page_index(tail_page, tail);
+ kmemcheck_annotate_bitfield(event, bitfield);
rb_update_event(event, type, length);
/* The passed in type is zero for DATA */
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8acd9b8..c1878bf 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -344,7 +344,7 @@ static raw_spinlock_t ftrace_max_lock =
/*
* Copy the new maximum trace into the separate maximum-trace
* structure. (this way the maximum trace is permanently saved,
- * for later retrieval via /debugfs/tracing/latency_trace)
+ * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
*/
static void
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
@@ -2414,21 +2414,20 @@ static const struct file_operations tracing_iter_fops = {
static const char readme_msg[] =
"tracing mini-HOWTO:\n\n"
- "# mkdir /debug\n"
- "# mount -t debugfs nodev /debug\n\n"
- "# cat /debug/tracing/available_tracers\n"
+ "# mount -t debugfs nodev /sys/kernel/debug\n\n"
+ "# cat /sys/kernel/debug/tracing/available_tracers\n"
"wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n"
- "# cat /debug/tracing/current_tracer\n"
+ "# cat /sys/kernel/debug/tracing/current_tracer\n"
"nop\n"
- "# echo sched_switch > /debug/tracing/current_tracer\n"
- "# cat /debug/tracing/current_tracer\n"
+ "# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n"
+ "# cat /sys/kernel/debug/tracing/current_tracer\n"
"sched_switch\n"
- "# cat /debug/tracing/trace_options\n"
+ "# cat /sys/kernel/debug/tracing/trace_options\n"
"noprint-parent nosym-offset nosym-addr noverbose\n"
- "# echo print-parent > /debug/tracing/trace_options\n"
- "# echo 1 > /debug/tracing/tracing_enabled\n"
- "# cat /debug/tracing/trace > /tmp/trace.txt\n"
- "# echo 0 > /debug/tracing/tracing_enabled\n"
+ "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
+ "# echo 1 > /sys/kernel/debug/tracing/tracing_enabled\n"
+ "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
+ "# echo 0 > /sys/kernel/debug/tracing/tracing_enabled\n"
;
static ssize_t
diff --git a/kernel/user.c b/kernel/user.c
index 850e0ba..2c000e7 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -75,21 +75,6 @@ static void uid_hash_remove(struct user_struct *up)
put_user_ns(up->user_ns);
}
-static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
-{
- struct user_struct *user;
- struct hlist_node *h;
-
- hlist_for_each_entry(user, h, hashent, uidhash_node) {
- if (user->uid == uid) {
- atomic_inc(&user->__count);
- return user;
- }
- }
-
- return NULL;
-}
-
#ifdef CONFIG_USER_SCHED
static void sched_destroy_user(struct user_struct *up)
@@ -119,6 +104,23 @@ static int sched_create_user(struct user_struct *up) { return 0; }
#if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
+static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
+{
+ struct user_struct *user;
+ struct hlist_node *h;
+
+ hlist_for_each_entry(user, h, hashent, uidhash_node) {
+ if (user->uid == uid) {
+ /* possibly resurrect an "almost deleted" object */
+ if (atomic_inc_return(&user->__count) == 1)
+ cancel_delayed_work(&user->work);
+ return user;
+ }
+ }
+
+ return NULL;
+}
+
static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
static DEFINE_MUTEX(uids_mutex);
@@ -283,12 +285,12 @@ int __init uids_sysfs_init(void)
return uids_user_create(&root_user);
}
-/* work function to remove sysfs directory for a user and free up
+/* delayed work function to remove sysfs directory for a user and free up
* corresponding structures.
*/
static void cleanup_user_struct(struct work_struct *w)
{
- struct user_struct *up = container_of(w, struct user_struct, work);
+ struct user_struct *up = container_of(w, struct user_struct, work.work);
unsigned long flags;
int remove_user = 0;
@@ -297,15 +299,12 @@ static void cleanup_user_struct(struct work_struct *w)
*/
uids_mutex_lock();
- local_irq_save(flags);
-
- if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
+ spin_lock_irqsave(&uidhash_lock, flags);
+ if (atomic_read(&up->__count) == 0) {
uid_hash_remove(up);
remove_user = 1;
- spin_unlock_irqrestore(&uidhash_lock, flags);
- } else {
- local_irq_restore(flags);
}
+ spin_unlock_irqrestore(&uidhash_lock, flags);
if (!remove_user)
goto done;
@@ -331,16 +330,28 @@ done:
*/
static void free_user(struct user_struct *up, unsigned long flags)
{
- /* restore back the count */
- atomic_inc(&up->__count);
spin_unlock_irqrestore(&uidhash_lock, flags);
-
- INIT_WORK(&up->work, cleanup_user_struct);
- schedule_work(&up->work);
+ INIT_DELAYED_WORK(&up->work, cleanup_user_struct);
+ schedule_delayed_work(&up->work, msecs_to_jiffies(1000));
}
#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
+static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
+{
+ struct user_struct *user;
+ struct hlist_node *h;
+
+ hlist_for_each_entry(user, h, hashent, uidhash_node) {
+ if (user->uid == uid) {
+ atomic_inc(&user->__count);
+ return user;
+ }
+ }
+
+ return NULL;
+}
+
int uids_sysfs_init(void) { return 0; }
static inline int uids_user_create(struct user_struct *up) { return 0; }
static inline void uids_mutex_lock(void) { }