From 003002e04ed38618fc37b92ba128f5ca79d39f4f Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 5 Jun 2013 12:12:16 +0900 Subject: kprobes: Fix arch_prepare_kprobe to handle copy insn failures Fix arch_prepare_kprobe() to handle failures in copy instruction correctly. This fix is related to the previous fix: 8101376 which made __copy_instruction return an error result if failed, but caller site was not updated to handle it. Thus, this is the other half of the bugfix. This fix is also related to the following bug-report: https://bugzilla.redhat.com/show_bug.cgi?id=910649 Signed-off-by: Masami Hiramatsu Acked-by: Steven Rostedt Tested-by: Jonathan Lebon Cc: Frank Ch. Eigler Cc: systemtap@sourceware.org Cc: yrl.pp-manager.tt@hitachi.com Link: http://lkml.kernel.org/r/20130605031216.15285.2001.stgit@mhiramat-M0-7522 Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 9895a9a..211bce4 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -365,10 +365,14 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src) return insn.length; } -static void __kprobes arch_copy_kprobe(struct kprobe *p) +static int __kprobes arch_copy_kprobe(struct kprobe *p) { + int ret; + /* Copy an instruction with recovering if other optprobe modifies it.*/ - __copy_instruction(p->ainsn.insn, p->addr); + ret = __copy_instruction(p->ainsn.insn, p->addr); + if (!ret) + return -EINVAL; /* * __copy_instruction can modify the displacement of the instruction, @@ -384,6 +388,8 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p) /* Also, displacement change doesn't affect the first byte */ p->opcode = p->ainsn.insn[0]; + + return 0; } int __kprobes arch_prepare_kprobe(struct kprobe *p) @@ -397,8 +403,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) p->ainsn.insn = get_insn_slot(); if (!p->ainsn.insn) return -ENOMEM; - arch_copy_kprobe(p); - return 0; + + return arch_copy_kprobe(p); } void __kprobes arch_arm_kprobe(struct kprobe *p) -- cgit v0.10.2 From 8b4d801b2b123b6c09742f861fe44a8527b84d47 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 20 Jun 2013 17:50:06 +0200 Subject: hw_breakpoint: Fix cpu check in task_bp_pinned(cpu) trinity fuzzer triggered WARN_ONCE("Can't find any breakpoint slot") in arch_install_hw_breakpoint() but the problem is not arch-specific. The problem is, task_bp_pinned(cpu) checks "cpu == iter->cpu" but this doesn't account the "all cpus" events with iter->cpu < 0. This means that, say, register_user_hw_breakpoint(tsk) can happily create the arbitrary number > HBP_NUM of breakpoints which can not be activated. toggle_bp_task_slot() is equally wrong by the same reason and nr_task_bp_pinned[] can have negative entries. Simple test: # perl -e 'sleep 1 while 1' & # perf record -e mem:0x10,mem:0x10,mem:0x10,mem:0x10,mem:0x10 -p `pidof perl` Before this patch this triggers the same problem/WARN_ON(), after the patch it correctly fails with -ENOSPC. Reported-by: Vince Weaver Signed-off-by: Oleg Nesterov Acked-by: Frederic Weisbecker Cc: Link: http://lkml.kernel.org/r/20130620155006.GA6324@redhat.com Signed-off-by: Ingo Molnar diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index a64f8ae..a853dea 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -120,7 +120,7 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type) list_for_each_entry(iter, &bp_task_head, hw.bp_list) { if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type && - cpu == iter->cpu) + (iter->cpu < 0 || cpu == iter->cpu)) count += hw_breakpoint_weight(iter); } -- cgit v0.10.2 From c790b0ad23f427c7522ffed264706238c57c007e Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 20 Jun 2013 17:50:09 +0200 Subject: hw_breakpoint: Use cpu_possible_mask in {reserve,release}_bp_slot() fetch_bp_busy_slots() and toggle_bp_slot() use for_each_online_cpu(), this is obviously wrong wrt cpu_up() or cpu_down(), we can over/under account the per-cpu numbers. For example: # echo 0 >> /sys/devices/system/cpu/cpu1/online # perf record -e mem:0x10 -p 1 & # echo 1 >> /sys/devices/system/cpu/cpu1/online # perf record -e mem:0x10,mem:0x10,mem:0x10,mem:0x10 -C1 -a & # taskset -p 0x2 1 triggers the same WARN_ONCE("Can't find any breakpoint slot") in arch_install_hw_breakpoint(). Reported-by: Vince Weaver Signed-off-by: Oleg Nesterov Acked-by: Frederic Weisbecker Cc: Link: http://lkml.kernel.org/r/20130620155009.GA6327@redhat.com Signed-off-by: Ingo Molnar diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index a853dea..20185ea 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -149,7 +149,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, return; } - for_each_online_cpu(cpu) { + for_each_possible_cpu(cpu) { unsigned int nr; nr = per_cpu(nr_cpu_bp_pinned[type], cpu); @@ -235,7 +235,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, if (cpu >= 0) { toggle_bp_task_slot(bp, cpu, enable, type, weight); } else { - for_each_online_cpu(cpu) + for_each_possible_cpu(cpu) toggle_bp_task_slot(bp, cpu, enable, type, weight); } -- cgit v0.10.2