aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorbibo mao <bibo.mao@intel.com>2006-09-26 14:20:37 -0400
committerTony Luck <tony.luck@intel.com>2006-09-26 14:20:37 -0400
commit214ddde2f95037e129eff7e895869771719c7c1b (patch)
tree6879e433635ce84b964d7c78331cd8eda698f35e
parenta4b47ab9464a8200528fad3101668abdd7379cf9 (diff)
[IA64] kprobe opcode 16 bytes alignment on IA64
On IA64 instruction opcode must be 16 bytes alignment, in kprobe structure there is one element to save original instruction, currently saved opcode is not statically allocated in kprobe structure, that can not assure 16 bytes alignment. This patch dynamically allocated kprobe instruction opcode to assure 16 bytes alignment. Signed-off-by: bibo mao <bibo.mao@intel.com> Acked-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r--arch/ia64/kernel/kprobes.c55
-rw-r--r--include/asm-ia64/kprobes.h9
2 files changed, 34 insertions, 30 deletions
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 781960f80b6f..320cb7a91c9d 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -136,10 +136,8 @@ static void __kprobes update_kprobe_inst_flag(uint template, uint slot,
136static int __kprobes unsupported_inst(uint template, uint slot, 136static int __kprobes unsupported_inst(uint template, uint slot,
137 uint major_opcode, 137 uint major_opcode,
138 unsigned long kprobe_inst, 138 unsigned long kprobe_inst,
139 struct kprobe *p) 139 unsigned long addr)
140{ 140{
141 unsigned long addr = (unsigned long)p->addr;
142
143 if (bundle_encoding[template][slot] == I) { 141 if (bundle_encoding[template][slot] == I) {
144 switch (major_opcode) { 142 switch (major_opcode) {
145 case 0x0: //I_UNIT_MISC_OPCODE: 143 case 0x0: //I_UNIT_MISC_OPCODE:
@@ -217,7 +215,7 @@ static void __kprobes prepare_break_inst(uint template, uint slot,
217 struct kprobe *p) 215 struct kprobe *p)
218{ 216{
219 unsigned long break_inst = BREAK_INST; 217 unsigned long break_inst = BREAK_INST;
220 bundle_t *bundle = &p->ainsn.insn.bundle; 218 bundle_t *bundle = &p->opcode.bundle;
221 219
222 /* 220 /*
223 * Copy the original kprobe_inst qualifying predicate(qp) 221 * Copy the original kprobe_inst qualifying predicate(qp)
@@ -423,11 +421,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
423 unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL); 421 unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL);
424 unsigned long kprobe_inst=0; 422 unsigned long kprobe_inst=0;
425 unsigned int slot = addr & 0xf, template, major_opcode = 0; 423 unsigned int slot = addr & 0xf, template, major_opcode = 0;
426 bundle_t *bundle = &p->ainsn.insn.bundle; 424 bundle_t *bundle;
427
428 memcpy(&p->opcode.bundle, kprobe_addr, sizeof(bundle_t));
429 memcpy(&p->ainsn.insn.bundle, kprobe_addr, sizeof(bundle_t));
430 425
426 bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle;
431 template = bundle->quad0.template; 427 template = bundle->quad0.template;
432 428
433 if(valid_kprobe_addr(template, slot, addr)) 429 if(valid_kprobe_addr(template, slot, addr))
@@ -440,20 +436,19 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
440 /* Get kprobe_inst and major_opcode from the bundle */ 436 /* Get kprobe_inst and major_opcode from the bundle */
441 get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); 437 get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode);
442 438
443 if (unsupported_inst(template, slot, major_opcode, kprobe_inst, p)) 439 if (unsupported_inst(template, slot, major_opcode, kprobe_inst, addr))
444 return -EINVAL; 440 return -EINVAL;
445 441
446 prepare_break_inst(template, slot, major_opcode, kprobe_inst, p);
447 442
448 return 0; 443 p->ainsn.insn = get_insn_slot();
449} 444 if (!p->ainsn.insn)
445 return -ENOMEM;
446 memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t));
447 memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t));
450 448
451void __kprobes flush_insn_slot(struct kprobe *p) 449 prepare_break_inst(template, slot, major_opcode, kprobe_inst, p);
452{
453 unsigned long arm_addr;
454 450
455 arm_addr = ((unsigned long)&p->opcode.bundle) & ~0xFULL; 451 return 0;
456 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
457} 452}
458 453
459void __kprobes arch_arm_kprobe(struct kprobe *p) 454void __kprobes arch_arm_kprobe(struct kprobe *p)
@@ -461,9 +456,10 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
461 unsigned long addr = (unsigned long)p->addr; 456 unsigned long addr = (unsigned long)p->addr;
462 unsigned long arm_addr = addr & ~0xFULL; 457 unsigned long arm_addr = addr & ~0xFULL;
463 458
464 flush_insn_slot(p); 459 flush_icache_range((unsigned long)p->ainsn.insn,
465 memcpy((char *)arm_addr, &p->ainsn.insn.bundle, sizeof(bundle_t)); 460 (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
466 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); 461 memcpy((char *)arm_addr, &p->opcode, sizeof(kprobe_opcode_t));
462 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t));
467} 463}
468 464
469void __kprobes arch_disarm_kprobe(struct kprobe *p) 465void __kprobes arch_disarm_kprobe(struct kprobe *p)
@@ -471,11 +467,18 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
471 unsigned long addr = (unsigned long)p->addr; 467 unsigned long addr = (unsigned long)p->addr;
472 unsigned long arm_addr = addr & ~0xFULL; 468 unsigned long arm_addr = addr & ~0xFULL;
473 469
474 /* p->opcode contains the original unaltered bundle */ 470 /* p->ainsn.insn contains the original unaltered kprobe_opcode_t */
475 memcpy((char *) arm_addr, (char *) &p->opcode.bundle, sizeof(bundle_t)); 471 memcpy((char *) arm_addr, (char *) p->ainsn.insn,
476 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); 472 sizeof(kprobe_opcode_t));
473 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t));
477} 474}
478 475
476void __kprobes arch_remove_kprobe(struct kprobe *p)
477{
478 mutex_lock(&kprobe_mutex);
479 free_insn_slot(p->ainsn.insn);
480 mutex_unlock(&kprobe_mutex);
481}
479/* 482/*
480 * We are resuming execution after a single step fault, so the pt_regs 483 * We are resuming execution after a single step fault, so the pt_regs
481 * structure reflects the register state after we executed the instruction 484 * structure reflects the register state after we executed the instruction
@@ -486,12 +489,12 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
486 */ 489 */
487static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) 490static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
488{ 491{
489 unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL; 492 unsigned long bundle_addr = (unsigned long) (&p->ainsn.insn->bundle);
490 unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; 493 unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
491 unsigned long template; 494 unsigned long template;
492 int slot = ((unsigned long)p->addr & 0xf); 495 int slot = ((unsigned long)p->addr & 0xf);
493 496
494 template = p->opcode.bundle.quad0.template; 497 template = p->ainsn.insn->bundle.quad0.template;
495 498
496 if (slot == 1 && bundle_encoding[template][1] == L) 499 if (slot == 1 && bundle_encoding[template][1] == L)
497 slot = 2; 500 slot = 2;
@@ -553,7 +556,7 @@ turn_ss_off:
553 556
554static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs) 557static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs)
555{ 558{
556 unsigned long bundle_addr = (unsigned long) &p->opcode.bundle; 559 unsigned long bundle_addr = (unsigned long) &p->ainsn.insn->bundle;
557 unsigned long slot = (unsigned long)p->addr & 0xf; 560 unsigned long slot = (unsigned long)p->addr & 0xf;
558 561
559 /* single step inline if break instruction */ 562 /* single step inline if break instruction */
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h
index 938904910115..1b45b71c79b9 100644
--- a/include/asm-ia64/kprobes.h
+++ b/include/asm-ia64/kprobes.h
@@ -29,7 +29,8 @@
29#include <linux/percpu.h> 29#include <linux/percpu.h>
30#include <asm/break.h> 30#include <asm/break.h>
31 31
32#define MAX_INSN_SIZE 16 32#define __ARCH_WANT_KPROBES_INSN_SLOT
33#define MAX_INSN_SIZE 1
33#define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6) 34#define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6)
34 35
35typedef union cmp_inst { 36typedef union cmp_inst {
@@ -94,7 +95,7 @@ struct kprobe_ctlblk {
94#define IP_RELATIVE_PREDICT_OPCODE (7) 95#define IP_RELATIVE_PREDICT_OPCODE (7)
95#define LONG_BRANCH_OPCODE (0xC) 96#define LONG_BRANCH_OPCODE (0xC)
96#define LONG_CALL_OPCODE (0xD) 97#define LONG_CALL_OPCODE (0xD)
97#define arch_remove_kprobe(p) do {} while (0) 98#define flush_insn_slot(p) do { } while (0)
98 99
99typedef struct kprobe_opcode { 100typedef struct kprobe_opcode {
100 bundle_t bundle; 101 bundle_t bundle;
@@ -108,7 +109,7 @@ struct fnptr {
108/* Architecture specific copy of original instruction*/ 109/* Architecture specific copy of original instruction*/
109struct arch_specific_insn { 110struct arch_specific_insn {
110 /* copy of the instruction to be emulated */ 111 /* copy of the instruction to be emulated */
111 kprobe_opcode_t insn; 112 kprobe_opcode_t *insn;
112 #define INST_FLAG_FIX_RELATIVE_IP_ADDR 1 113 #define INST_FLAG_FIX_RELATIVE_IP_ADDR 1
113 #define INST_FLAG_FIX_BRANCH_REG 2 114 #define INST_FLAG_FIX_BRANCH_REG 2
114 #define INST_FLAG_BREAK_INST 4 115 #define INST_FLAG_BREAK_INST 4
@@ -125,6 +126,6 @@ static inline void jprobe_return(void)
125} 126}
126extern void invalidate_stacked_regs(void); 127extern void invalidate_stacked_regs(void);
127extern void flush_register_stack(void); 128extern void flush_register_stack(void);
128extern void flush_insn_slot(struct kprobe *p); 129extern void arch_remove_kprobe(struct kprobe *p);
129 130
130#endif /* _ASM_KPROBES_H */ 131#endif /* _ASM_KPROBES_H */