aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorAnil S Keshavamurthy <anil.s.keshavamurthy@intel.com>2005-06-23 03:09:29 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-23 12:45:23 -0400
commitcd2675bf65455a45b54228b7acc0c6a26a164cb6 (patch)
tree102dd9bdf79ce8a5728dd098fb2b3a574c113186 /arch/ia64
parentb2761dc262b428475890fffd979687051beb12ba (diff)
[PATCH] Kprobes/IA64: support kprobe on branch/call instructions
This patch is required to support kprobe on branch/call instructions. Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/kernel/kprobes.c131
1 files changed, 116 insertions, 15 deletions
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 6683a44f419f..20a250e2e9b2 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -120,25 +120,75 @@ void arch_arm_kprobe(struct kprobe *p)
120 unsigned long arm_addr = addr & ~0xFULL; 120 unsigned long arm_addr = addr & ~0xFULL;
121 unsigned long slot = addr & 0xf; 121 unsigned long slot = addr & 0xf;
122 unsigned long template; 122 unsigned long template;
123 unsigned long major_opcode = 0;
124 unsigned long lx_type_inst = 0;
125 unsigned long kprobe_inst = 0;
123 bundle_t bundle; 126 bundle_t bundle;
124 127
128 p->ainsn.inst_flag = 0;
129 p->ainsn.target_br_reg = 0;
130
125 memcpy(&bundle, &p->ainsn.insn.bundle, sizeof(bundle_t)); 131 memcpy(&bundle, &p->ainsn.insn.bundle, sizeof(bundle_t));
132 template = bundle.quad0.template;
133 if (slot == 1 && bundle_encoding[template][1] == L) {
134 lx_type_inst = 1;
135 slot = 2;
136 }
137
126 138
127 template = bundle.quad0.template;
128 if (slot == 1 && bundle_encoding[template][1] == L)
129 slot = 2;
130 switch (slot) { 139 switch (slot) {
131 case 0: 140 case 0:
141 major_opcode = (bundle.quad0.slot0 >> SLOT0_OPCODE_SHIFT);
142 kprobe_inst = bundle.quad0.slot0;
132 bundle.quad0.slot0 = BREAK_INST; 143 bundle.quad0.slot0 = BREAK_INST;
133 break; 144 break;
134 case 1: 145 case 1:
146 major_opcode = (bundle.quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT);
147 kprobe_inst = (bundle.quad0.slot1_p0 |
148 (bundle.quad1.slot1_p1 << (64-46)));
135 bundle.quad0.slot1_p0 = BREAK_INST; 149 bundle.quad0.slot1_p0 = BREAK_INST;
136 bundle.quad1.slot1_p1 = (BREAK_INST >> (64-46)); 150 bundle.quad1.slot1_p1 = (BREAK_INST >> (64-46));
137 break; 151 break;
138 case 2: 152 case 2:
153 major_opcode = (bundle.quad1.slot2 >> SLOT2_OPCODE_SHIFT);
154 kprobe_inst = bundle.quad1.slot2;
139 bundle.quad1.slot2 = BREAK_INST; 155 bundle.quad1.slot2 = BREAK_INST;
140 break; 156 break;
141 } 157 }
158 /*
159 * Look for IP relative Branches, IP relative call or
160 * IP relative predicate instructions
161 */
162 if (bundle_encoding[template][slot] == B) {
163 switch (major_opcode) {
164 case INDIRECT_CALL_OPCODE:
165 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
166 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
167 break;
168 case IP_RELATIVE_PREDICT_OPCODE:
169 case IP_RELATIVE_BRANCH_OPCODE:
170 p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
171 break;
172 case IP_RELATIVE_CALL_OPCODE:
173 p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
174 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
175 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
176 break;
177 default:
178 /* Do nothing */
179 break;
180 }
181 } else if (lx_type_inst) {
182 switch (major_opcode) {
183 case LONG_CALL_OPCODE:
184 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
185 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
186 break;
187 default:
188 /* Do nothing */
189 break;
190 }
191 }
142 192
143 /* Flush icache for the instruction at the emulated address */ 193 /* Flush icache for the instruction at the emulated address */
144 flush_icache_range((unsigned long)&p->ainsn.insn.bundle, 194 flush_icache_range((unsigned long)&p->ainsn.insn.bundle,
@@ -170,24 +220,75 @@ void arch_remove_kprobe(struct kprobe *p)
170 * We are resuming execution after a single step fault, so the pt_regs 220 * We are resuming execution after a single step fault, so the pt_regs
171 * structure reflects the register state after we executed the instruction 221 * structure reflects the register state after we executed the instruction
172 * located in the kprobe (p->ainsn.insn.bundle). We still need to adjust 222 * located in the kprobe (p->ainsn.insn.bundle). We still need to adjust
173 * the ip to point back to the original stack address, and if we see that 223 * the ip to point back to the original stack address. To set the IP address
174 * the slot has incremented back to zero, then we need to point to the next 224 * to original stack address, handle the case where we need to fixup the
175 * slot location. 225 * relative IP address and/or fixup branch register.
176 */ 226 */
177static void resume_execution(struct kprobe *p, struct pt_regs *regs) 227static void resume_execution(struct kprobe *p, struct pt_regs *regs)
178{ 228{
179 unsigned long bundle = (unsigned long)p->addr & ~0xFULL; 229 unsigned long bundle_addr = ((unsigned long) (&p->ainsn.insn.bundle)) & ~0xFULL;
230 unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
231 unsigned long template;
232 int slot = ((unsigned long)p->addr & 0xf);
180 233
181 /* 234 template = p->opcode.bundle.quad0.template;
182 * TODO: Handle cases where kprobe was inserted on a branch instruction 235
183 */ 236 if (slot == 1 && bundle_encoding[template][1] == L)
237 slot = 2;
238
239 if (p->ainsn.inst_flag) {
184 240
185 if (!ia64_psr(regs)->ri) 241 if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) {
186 regs->cr_iip = bundle + 0x10; 242 /* Fix relative IP address */
187 else 243 regs->cr_iip = (regs->cr_iip - bundle_addr) + resume_addr;
188 regs->cr_iip = bundle; 244 }
245
246 if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) {
247 /*
248 * Fix target branch register, software convention is
249 * to use either b0 or b6 or b7, so just checking
250 * only those registers
251 */
252 switch (p->ainsn.target_br_reg) {
253 case 0:
254 if ((regs->b0 == bundle_addr) ||
255 (regs->b0 == bundle_addr + 0x10)) {
256 regs->b0 = (regs->b0 - bundle_addr) +
257 resume_addr;
258 }
259 break;
260 case 6:
261 if ((regs->b6 == bundle_addr) ||
262 (regs->b6 == bundle_addr + 0x10)) {
263 regs->b6 = (regs->b6 - bundle_addr) +
264 resume_addr;
265 }
266 break;
267 case 7:
268 if ((regs->b7 == bundle_addr) ||
269 (regs->b7 == bundle_addr + 0x10)) {
270 regs->b7 = (regs->b7 - bundle_addr) +
271 resume_addr;
272 }
273 break;
274 } /* end switch */
275 }
276 goto turn_ss_off;
277 }
189 278
190 ia64_psr(regs)->ss = 0; 279 if (slot == 2) {
280 if (regs->cr_iip == bundle_addr + 0x10) {
281 regs->cr_iip = resume_addr + 0x10;
282 }
283 } else {
284 if (regs->cr_iip == bundle_addr) {
285 regs->cr_iip = resume_addr;
286 }
287 }
288
289turn_ss_off:
290 /* Turn off Single Step bit */
291 ia64_psr(regs)->ss = 0;
191} 292}
192 293
193static void prepare_ss(struct kprobe *p, struct pt_regs *regs) 294static void prepare_ss(struct kprobe *p, struct pt_regs *regs)