From 4545c89880138b30a868159bc1b209867b8a5f32 Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Mon, 27 Apr 2015 13:17:19 +0200 Subject: x86: introduce kaslr_offset() Offset that has been chosen for kaslr during kernel decompression can be easily computed as a difference between _text and __START_KERNEL. We are already making use of this in dump_kernel_offset() notifier and in arch_crash_save_vmcoreinfo(). Introduce kaslr_offset() that makes this computation instead of hard-coding it, so that other kernel code (such as live patching) can make use of it. Also convert existing users to make use of it. This patch is equivalent transofrmation without any effects on the resulting code: $ diff -u vmlinux.old.asm vmlinux.new.asm --- vmlinux.old.asm 2015-04-28 17:55:19.520983368 +0200 +++ vmlinux.new.asm 2015-04-28 17:55:24.141206072 +0200 @@ -1,5 +1,5 @@ -vmlinux.old: file format elf64-x86-64 +vmlinux.new: file format elf64-x86-64 Disassembly of section .text: $ Acked-by: Borislav Petkov Signed-off-by: Jiri Kosina diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index f69e06b..785ac2f 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -65,12 +65,18 @@ static inline void x86_ce4100_early_setup(void) { } * This is set up by the setup-routine at boot-time */ extern struct boot_params boot_params; +extern char _text[]; static inline bool kaslr_enabled(void) { return !!(boot_params.hdr.loadflags & KASLR_FLAG); } +static inline unsigned long kaslr_offset(void) +{ + return (unsigned long)&_text - __START_KERNEL; +} + /* * Do NOT EVER look at the BIOS memory size location. * It does not work on many machines. diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 415480d..e102963 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -25,6 +25,7 @@ #include #include #include +#include #ifdef CONFIG_KEXEC_FILE static struct kexec_file_ops *kexec_file_loaders[] = { @@ -334,7 +335,7 @@ void arch_crash_save_vmcoreinfo(void) VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); #endif vmcoreinfo_append_str("KERNELOFFSET=%lx\n", - (unsigned long)&_text - __START_KERNEL); + kaslr_offset()); } /* arch-dependent functionality related to kexec file-based syscall */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index d74ac33..5056d3c 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -834,7 +834,7 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) { if (kaslr_enabled()) { pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n", - (unsigned long)&_text - __START_KERNEL, + kaslr_offset(), __START_KERNEL, __START_KERNEL_map, MODULES_VADDR-1); -- cgit v0.10.2 From 5d4351ba654c2f25eb4f6883db742a16bccbb36b Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Mon, 27 Apr 2015 13:25:23 +0200 Subject: livepatch: x86: make kASLR logic more accurate We give up old_addr hint from the coming patch module in cases when kernel load base has been randomized (as in such case, the coming module has no idea about the exact randomization offset). We are currently too pessimistic, and give up immediately as soon as CONFIG_RANDOMIZE_BASE is set; this doesn't however directly imply that the load base has actually been randomized. There are config options that disable kASLR (such as hibernation), user could have disabled kaslr on kernel command-line, etc. The loader propagates the information whether kernel has been randomized through bootparams. This allows us to have the condition more accurate. On top of that, it seems unnecessary to give up old_addr hints even if randomization is active. The relocation offset can be computed using kaslr_ofsset(), and therefore old_addr can be adjusted accordingly. Acked-by: Josh Poimboeuf Signed-off-by: Jiri Kosina diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h index 2d29197..19c099a 100644 --- a/arch/x86/include/asm/livepatch.h +++ b/arch/x86/include/asm/livepatch.h @@ -21,6 +21,7 @@ #ifndef _ASM_X86_LIVEPATCH_H #define _ASM_X86_LIVEPATCH_H +#include #include #include diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 284e269..0e7c23c 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -234,8 +234,9 @@ static int klp_find_verify_func_addr(struct klp_object *obj, int ret; #if defined(CONFIG_RANDOMIZE_BASE) - /* KASLR is enabled, disregard old_addr from user */ - func->old_addr = 0; + /* If KASLR has been enabled, adjust old_addr accordingly */ + if (kaslr_enabled() && func->old_addr) + func->old_addr += kaslr_offset(); #endif if (!func->old_addr || klp_is_module(obj)) -- cgit v0.10.2 From 535b3ddc285825c058cef3436a9aa207edffa6cd Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Wed, 29 Apr 2015 18:09:53 +0200 Subject: x86: kaslr: fix build due to missing ALIGN definition Fengguang's bot reported that 4545c898 ("x86: introduce kaslr_offset()") broke randconfig build In file included from arch/x86/xen/vga.c:5:0: arch/x86/include/asm/setup.h: In function 'kaslr_offset': >> arch/x86/include/asm/setup.h:77:2: error: implicit declaration of function 'ALIGN' [-Werror=implicit-function-declaration] return (unsigned long)&_text - __START_KERNEL; ^ Fix that by making setup.h self-sufficient by explicitly including linux/kernel.h, which is needed for ALIGN() (which is what __START_KERNEL contains in its expansion). Reported-by: fengguang.wu@intel.com Signed-off-by: Jiri Kosina diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 785ac2f..11af24e 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -60,6 +60,7 @@ static inline void x86_ce4100_early_setup(void) { } #ifndef _SETUP #include +#include /* * This is set up by the setup-routine at boot-time -- cgit v0.10.2 From e76ff06a959336fae64b53c36ec60940ca6ef04f Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Mon, 11 May 2015 07:52:29 +0200 Subject: livepatch: match return value to function signature klp_initialized() should return bool but is actually returning struct kobject * - convert it to a boolean explicitly. Signed-off-by: Nicholas Mc Guire Reviewed-by: Jiri Slaby Signed-off-by: Jiri Kosina diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 284e269..c5e631cd 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -128,7 +128,7 @@ static bool klp_is_patch_registered(struct klp_patch *patch) static bool klp_initialized(void) { - return klp_root_kobj; + return !!klp_root_kobj; } struct klp_find_arg { -- cgit v0.10.2 From 36e505c16e610403c110bab76a95cbfa0436a928 Mon Sep 17 00:00:00 2001 From: Minfei Huang Date: Fri, 15 May 2015 10:22:48 +0800 Subject: livepatch: Prevent patch inconsistencies if the coming module notifier fails The previous patches can be applied, once the corresponding module is loaded. In general, the patch will do relocation (if necessary) and obtain/verify function address before we start to enable patch. There are three different situations in which the coming module notifier can fail: 1) relocations are not applied for some reason. In this case kallsyms for module symbol is not called at all. The patch is not applied to the module. If the user disable and enable patch again, there is possible bug in klp_enable_func. If the user specified func->old_addr for some function in the module (and he shouldn't do that, but nevertheless) our warning would not catch it, ftrace will reject to register the handler because of wrong address or will register the handler for wrong address. 2) relocations are applied successfully, but kallsyms lookup fails. In this case func->old_addr can be correct for all previous lookups, 0 for current failed one, and "unspecified" for the rest. If we undergo the same scenario as in 1, the behaviour differs for three cases, but the patch is not enabled anyway. 3) the object is initialized, but klp_enable_object fails in the notifier due to possible ftrace error. Since it is improbable that ftrace would heal itself in the future, we would get those errors everytime the patch is enabled. In order to fix above situations, we can make obj->mod to NULL, if the coming modified notifier fails. Signed-off-by: Minfei Huang Acked-by: Josh Poimboeuf Reviewed-by: Miroslav Benes Signed-off-by: Jiri Kosina diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index c5e631cd..c03c046 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -883,7 +883,7 @@ int klp_register_patch(struct klp_patch *patch) } EXPORT_SYMBOL_GPL(klp_register_patch); -static void klp_module_notify_coming(struct klp_patch *patch, +static int klp_module_notify_coming(struct klp_patch *patch, struct klp_object *obj) { struct module *pmod = patch->mod; @@ -891,22 +891,23 @@ static void klp_module_notify_coming(struct klp_patch *patch, int ret; ret = klp_init_object_loaded(patch, obj); - if (ret) - goto err; + if (ret) { + pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n", + pmod->name, mod->name, ret); + return ret; + } if (patch->state == KLP_DISABLED) - return; + return 0; pr_notice("applying patch '%s' to loading module '%s'\n", pmod->name, mod->name); ret = klp_enable_object(obj); - if (!ret) - return; - -err: - pr_warn("failed to apply patch '%s' to module '%s' (%d)\n", - pmod->name, mod->name, ret); + if (ret) + pr_warn("failed to apply patch '%s' to module '%s' (%d)\n", + pmod->name, mod->name, ret); + return ret; } static void klp_module_notify_going(struct klp_patch *patch, @@ -930,6 +931,7 @@ disabled: static int klp_module_notify(struct notifier_block *nb, unsigned long action, void *data) { + int ret; struct module *mod = data; struct klp_patch *patch; struct klp_object *obj; @@ -955,7 +957,12 @@ static int klp_module_notify(struct notifier_block *nb, unsigned long action, if (action == MODULE_STATE_COMING) { obj->mod = mod; - klp_module_notify_coming(patch, obj); + ret = klp_module_notify_coming(patch, obj); + if (ret) { + obj->mod = NULL; + pr_warn("patch '%s' is in an inconsistent state!\n", + patch->mod->name); + } } else /* MODULE_STATE_GOING */ klp_module_notify_going(patch, obj); -- cgit v0.10.2 From cad706df7e4a00a595f2662f32c0fc174aa4e61f Mon Sep 17 00:00:00 2001 From: Miroslav Benes Date: Tue, 19 May 2015 12:01:18 +0200 Subject: livepatch: make kobject in klp_object statically allocated Make kobj variable (of type struct kobject) statically allocated in klp_object structure. It will allow us to move in the func-object-patch hierarchy through kobject links. The only reason to have it dynamic was to not have empty release callback in the code. However we have empty callbacks for function and patch in the code now, so it is no longer valid and the advantage of static allocation is clear. Signed-off-by: Miroslav Benes Signed-off-by: Jiri Slaby Acked-by: Josh Poimboeuf Signed-off-by: Jiri Kosina diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index ee6dbb3..fe45f2f 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -99,7 +99,7 @@ struct klp_object { struct klp_func *funcs; /* internal */ - struct kobject *kobj; + struct kobject kobj; struct module *mod; enum klp_state state; }; diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index c03c046..e997782 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -651,6 +651,15 @@ static struct kobj_type klp_ktype_patch = { .default_attrs = klp_patch_attrs, }; +static void klp_kobj_release_object(struct kobject *kobj) +{ +} + +static struct kobj_type klp_ktype_object = { + .release = klp_kobj_release_object, + .sysfs_ops = &kobj_sysfs_ops, +}; + static void klp_kobj_release_func(struct kobject *kobj) { } @@ -695,7 +704,7 @@ static void klp_free_objects_limited(struct klp_patch *patch, for (obj = patch->objs; obj->funcs && obj != limit; obj++) { klp_free_funcs_limited(obj, NULL); - kobject_put(obj->kobj); + kobject_put(&obj->kobj); } } @@ -713,7 +722,7 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) func->state = KLP_DISABLED; return kobject_init_and_add(&func->kobj, &klp_ktype_func, - obj->kobj, "%s", func->old_name); + &obj->kobj, "%s", func->old_name); } /* parts of the initialization that is done only when the object is loaded */ @@ -753,9 +762,10 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) klp_find_object_module(obj); name = klp_is_module(obj) ? obj->name : "vmlinux"; - obj->kobj = kobject_create_and_add(name, &patch->kobj); - if (!obj->kobj) - return -ENOMEM; + ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object, + &patch->kobj, "%s", name); + if (ret) + return ret; for (func = obj->funcs; func->old_name; func++) { ret = klp_init_func(obj, func); @@ -773,7 +783,7 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) free: klp_free_funcs_limited(obj, func); - kobject_put(obj->kobj); + kobject_put(&obj->kobj); return ret; } -- cgit v0.10.2 From 8cdd043ab32c2ff28d2a77c514a768a9edce244c Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Tue, 19 May 2015 12:01:19 +0200 Subject: livepatch: introduce patch/func-walking helpers klp_for_each_object and klp_for_each_func are now used all over the code. One need not think what is the proper condition to check in the for loop now. Signed-off-by: Jiri Slaby Acked-by: Josh Poimboeuf Signed-off-by: Jiri Kosina diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index fe45f2f..31db7a0 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -123,6 +123,12 @@ struct klp_patch { enum klp_state state; }; +#define klp_for_each_object(patch, obj) \ + for (obj = patch->objs; obj->funcs; obj++) + +#define klp_for_each_func(obj, func) \ + for (func = obj->funcs; func->old_name; func++) + int klp_register_patch(struct klp_patch *); int klp_unregister_patch(struct klp_patch *); int klp_enable_patch(struct klp_patch *); diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index e997782..c38398e 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -422,7 +422,7 @@ static void klp_disable_object(struct klp_object *obj) { struct klp_func *func; - for (func = obj->funcs; func->old_name; func++) + klp_for_each_func(obj, func) if (func->state == KLP_ENABLED) klp_disable_func(func); @@ -440,7 +440,7 @@ static int klp_enable_object(struct klp_object *obj) if (WARN_ON(!klp_is_object_loaded(obj))) return -EINVAL; - for (func = obj->funcs; func->old_name; func++) { + klp_for_each_func(obj, func) { ret = klp_enable_func(func); if (ret) { klp_disable_object(obj); @@ -463,7 +463,7 @@ static int __klp_disable_patch(struct klp_patch *patch) pr_notice("disabling patch '%s'\n", patch->mod->name); - for (obj = patch->objs; obj->funcs; obj++) { + klp_for_each_object(patch, obj) { if (obj->state == KLP_ENABLED) klp_disable_object(obj); } @@ -523,7 +523,7 @@ static int __klp_enable_patch(struct klp_patch *patch) pr_notice("enabling patch '%s'\n", patch->mod->name); - for (obj = patch->objs; obj->funcs; obj++) { + klp_for_each_object(patch, obj) { if (!klp_is_object_loaded(obj)) continue; @@ -689,7 +689,7 @@ static void klp_free_object_loaded(struct klp_object *obj) obj->mod = NULL; - for (func = obj->funcs; func->old_name; func++) + klp_for_each_func(obj, func) func->old_addr = 0; } @@ -738,7 +738,7 @@ static int klp_init_object_loaded(struct klp_patch *patch, return ret; } - for (func = obj->funcs; func->old_name; func++) { + klp_for_each_func(obj, func) { ret = klp_find_verify_func_addr(obj, func); if (ret) return ret; @@ -767,7 +767,7 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) if (ret) return ret; - for (func = obj->funcs; func->old_name; func++) { + klp_for_each_func(obj, func) { ret = klp_init_func(obj, func); if (ret) goto free; @@ -804,7 +804,7 @@ static int klp_init_patch(struct klp_patch *patch) if (ret) goto unlock; - for (obj = patch->objs; obj->funcs; obj++) { + klp_for_each_object(patch, obj) { ret = klp_init_object(patch, obj); if (ret) goto free; @@ -961,7 +961,7 @@ static int klp_module_notify(struct notifier_block *nb, unsigned long action, mod->klp_alive = false; list_for_each_entry(patch, &klp_patches, list) { - for (obj = patch->objs; obj->funcs; obj++) { + klp_for_each_object(patch, obj) { if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) continue; -- cgit v0.10.2 From 26029d88ad1ba6ad7d1f16f22dc67aa8eac0d391 Mon Sep 17 00:00:00 2001 From: Minfei Huang Date: Fri, 22 May 2015 22:26:29 +0800 Subject: livepatch: annotate klp_init() with __init module_init() function should be marked __init. [jkosina@suse.cz: remove overly verbose changelog] Signed-off-by: Minfei Huang Acked-by: Josh Poimboeuf Signed-off-by: Jiri Kosina diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index c38398e..a0acda4 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -990,7 +990,7 @@ static struct notifier_block klp_module_nb = { .priority = INT_MIN+1, /* called late but before ftrace notifier */ }; -static int klp_init(void) +static int __init klp_init(void) { int ret; -- cgit v0.10.2 From 9a1bd63cdae4b623494c4ebaf723a91c35ec49fb Mon Sep 17 00:00:00 2001 From: Miroslav Benes Date: Mon, 1 Jun 2015 17:48:37 +0200 Subject: livepatch: add module locking around kallsyms calls The list of loaded modules is walked through in module_kallsyms_on_each_symbol (called by kallsyms_on_each_symbol). The module_mutex lock should be acquired to prevent potential corruptions in the list. This was uncovered with new lockdep asserts in module code introduced by the commit 0be964be0d45 ("module: Sanitize RCU usage and locking") in recent next- trees. Signed-off-by: Miroslav Benes Acked-by: Josh Poimboeuf Cc: stable@vger.kernel.org Signed-off-by: Jiri Kosina diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 284e269..9ec5557 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -179,7 +179,9 @@ static int klp_find_object_symbol(const char *objname, const char *name, .count = 0 }; + mutex_lock(&module_mutex); kallsyms_on_each_symbol(klp_find_callback, &args); + mutex_unlock(&module_mutex); if (args.count == 0) pr_err("symbol '%s' not found in symbol table\n", name); @@ -219,13 +221,19 @@ static int klp_verify_vmlinux_symbol(const char *name, unsigned long addr) .name = name, .addr = addr, }; + int ret; - if (kallsyms_on_each_symbol(klp_verify_callback, &args)) - return 0; + mutex_lock(&module_mutex); + ret = kallsyms_on_each_symbol(klp_verify_callback, &args); + mutex_unlock(&module_mutex); - pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n", - name, addr); - return -EINVAL; + if (!ret) { + pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n", + name, addr); + return -EINVAL; + } + + return 0; } static int klp_find_verify_func_addr(struct klp_object *obj, -- cgit v0.10.2