diff options
author | Rusty Lynch <rusty.lynch@intel.com> | 2005-06-23 07:09:30 (GMT) |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-23 16:45:23 (GMT) |
commit | 8bc76772ad653bcaad1b0af72aafb6072ef0fa87 (patch) | |
tree | d778f0cc2640e078338d84cdc67cda403cc47d7b /arch/ia64 | |
parent | cd2675bf65455a45b54228b7acc0c6a26a164cb6 (diff) | |
download | linux-fsl-qoriq-8bc76772ad653bcaad1b0af72aafb6072ef0fa87.tar.xz |
[PATCH] Kprobes ia64 cleanup
A cleanup of the ia64 kprobes implementation such that all of the bundle
manipulation logic is concentrated in arch_prepare_kprobe().
With the current design for kprobes, the arch specific code only has a
chance to return failure inside the arch_prepare_kprobe() function.
This patch moves all of the work that was happening in arch_copy_kprobe()
and most of the work that was happening in arch_arm_kprobe() into
arch_prepare_kprobe(). By doing this we can add further robustness checks
in arch_arm_kprobe() and refuse to insert kprobes that will cause problems.
Signed-off-by: Rusty Lynch <Rusty.lynch@intel.com>
Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/kernel/kprobes.c | 178 |
1 files changed, 77 insertions, 101 deletions
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 20a250e..b7a2041 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -84,121 +84,97 @@ static enum instruction_type bundle_encoding[32][3] = { int arch_prepare_kprobe(struct kprobe *p) { unsigned long addr = (unsigned long) p->addr; - unsigned long bundle_addr = addr & ~0xFULL; + unsigned long *bundle_addr = (unsigned long *)(addr & ~0xFULL); unsigned long slot = addr & 0xf; - bundle_t bundle; unsigned long template; + unsigned long major_opcode = 0; + unsigned long lx_type_inst = 0; + unsigned long kprobe_inst = 0; + bundle_t *bundle = &p->ainsn.insn.bundle; - /* - * TODO: Verify that a probe is not being inserted - * in sensitive regions of code - * TODO: Verify that the memory holding the probe is rwx - * TODO: verify this is a kernel address - */ - memcpy(&bundle, (unsigned long *)bundle_addr, sizeof(bundle_t)); - template = bundle.quad0.template; - if (((bundle_encoding[template][1] == L) && slot > 1) || (slot > 2)) { - printk(KERN_WARNING "Attempting to insert unaligned kprobe at 0x%lx\n", addr); - return -EINVAL; - } - return 0; -} + memcpy(&p->opcode.bundle, bundle_addr, sizeof(bundle_t)); + memcpy(&p->ainsn.insn.bundle, bundle_addr, sizeof(bundle_t)); -void arch_copy_kprobe(struct kprobe *p) -{ - unsigned long addr = (unsigned long)p->addr; - unsigned long bundle_addr = addr & ~0xFULL; + p->ainsn.inst_flag = 0; + p->ainsn.target_br_reg = 0; - memcpy(&p->ainsn.insn.bundle, (unsigned long *)bundle_addr, - sizeof(bundle_t)); - memcpy(&p->opcode.bundle, &p->ainsn.insn.bundle, sizeof(bundle_t)); -} + template = bundle->quad0.template; -void arch_arm_kprobe(struct kprobe *p) -{ - unsigned long addr = (unsigned long)p->addr; - unsigned long arm_addr = addr & ~0xFULL; - unsigned long slot = addr & 0xf; - unsigned long template; - unsigned long major_opcode = 0; - unsigned long lx_type_inst = 0; - unsigned long kprobe_inst = 0; - bundle_t bundle; - - p->ainsn.inst_flag = 0; - p->ainsn.target_br_reg = 0; - - memcpy(&bundle, &p->ainsn.insn.bundle, sizeof(bundle_t)); - template = bundle.quad0.template; - if (slot == 1 && bundle_encoding[template][1] == L) { - lx_type_inst = 1; - slot = 2; - } + if (((bundle_encoding[template][1] == L) && slot > 1) || (slot > 2)) { + printk(KERN_WARNING "Attempting to insert unaligned kprobe at 0x%lx\n", + addr); + return -EINVAL; + } + if (slot == 1 && bundle_encoding[template][1] == L) { + lx_type_inst = 1; + slot = 2; + } switch (slot) { case 0: - major_opcode = (bundle.quad0.slot0 >> SLOT0_OPCODE_SHIFT); - kprobe_inst = bundle.quad0.slot0; - bundle.quad0.slot0 = BREAK_INST; + major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT); + kprobe_inst = bundle->quad0.slot0; + bundle->quad0.slot0 = BREAK_INST; break; case 1: - major_opcode = (bundle.quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT); - kprobe_inst = (bundle.quad0.slot1_p0 | - (bundle.quad1.slot1_p1 << (64-46))); - bundle.quad0.slot1_p0 = BREAK_INST; - bundle.quad1.slot1_p1 = (BREAK_INST >> (64-46)); + major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT); + kprobe_inst = (bundle->quad0.slot1_p0 | + (bundle->quad1.slot1_p1 << (64-46))); + bundle->quad0.slot1_p0 = BREAK_INST; + bundle->quad1.slot1_p1 = (BREAK_INST >> (64-46)); break; case 2: - major_opcode = (bundle.quad1.slot2 >> SLOT2_OPCODE_SHIFT); - kprobe_inst = bundle.quad1.slot2; - bundle.quad1.slot2 = BREAK_INST; + major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT); + kprobe_inst = bundle->quad1.slot2; + bundle->quad1.slot2 = BREAK_INST; break; } - /* - * Look for IP relative Branches, IP relative call or - * IP relative predicate instructions - */ - if (bundle_encoding[template][slot] == B) { - switch (major_opcode) { - case INDIRECT_CALL_OPCODE: - p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; - p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); - break; - case IP_RELATIVE_PREDICT_OPCODE: - case IP_RELATIVE_BRANCH_OPCODE: - p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; - break; - case IP_RELATIVE_CALL_OPCODE: - p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; - p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; - p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); - break; - default: - /* Do nothing */ - break; - } - } else if (lx_type_inst) { - switch (major_opcode) { - case LONG_CALL_OPCODE: - p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; - p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); - break; - default: - /* Do nothing */ - break; - } - } - - /* Flush icache for the instruction at the emulated address */ - flush_icache_range((unsigned long)&p->ainsn.insn.bundle, - (unsigned long)&p->ainsn.insn.bundle + - sizeof(bundle_t)); - /* - * Patch the original instruction with the probe instruction - * and flush the instruction cache - */ - memcpy((char *) arm_addr, (char *) &bundle, sizeof(bundle_t)); + + /* + * Look for IP relative Branches, IP relative call or + * IP relative predicate instructions + */ + if (bundle_encoding[template][slot] == B) { + switch (major_opcode) { + case INDIRECT_CALL_OPCODE: + p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; + p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); + break; + case IP_RELATIVE_PREDICT_OPCODE: + case IP_RELATIVE_BRANCH_OPCODE: + p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; + break; + case IP_RELATIVE_CALL_OPCODE: + p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; + p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; + p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); + break; + default: + /* Do nothing */ + break; + } + } else if (lx_type_inst) { + switch (major_opcode) { + case LONG_CALL_OPCODE: + p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; + p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); + break; + default: + /* Do nothing */ + break; + } + } + + return 0; +} + +void arch_arm_kprobe(struct kprobe *p) +{ + unsigned long addr = (unsigned long)p->addr; + unsigned long arm_addr = addr & ~0xFULL; + + memcpy((char *)arm_addr, &p->ainsn.insn.bundle, sizeof(bundle_t)); flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); } @@ -226,7 +202,7 @@ void arch_remove_kprobe(struct kprobe *p) */ static void resume_execution(struct kprobe *p, struct pt_regs *regs) { - unsigned long bundle_addr = ((unsigned long) (&p->ainsn.insn.bundle)) & ~0xFULL; + unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL; unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; unsigned long template; int slot = ((unsigned long)p->addr & 0xf); @@ -293,7 +269,7 @@ turn_ss_off: static void prepare_ss(struct kprobe *p, struct pt_regs *regs) { - unsigned long bundle_addr = (unsigned long) &p->ainsn.insn.bundle; + unsigned long bundle_addr = (unsigned long) &p->opcode.bundle; unsigned long slot = (unsigned long)p->addr & 0xf; /* Update instruction pointer (IIP) and slot number (IPSR.ri) */ |