aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/kprobes.c34
-rw-r--r--arch/ia64/kernel/kprobes.c89
-rw-r--r--arch/powerpc/kernel/kprobes.c26
-rw-r--r--arch/x86_64/kernel/kprobes.c41
-rw-r--r--kernel/kprobes.c16
5 files changed, 103 insertions, 103 deletions
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index afe6505ca0b3..7a97544f15a0 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -230,20 +230,20 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
230 struct pt_regs *regs) 230 struct pt_regs *regs)
231{ 231{
232 unsigned long *sara = (unsigned long *)&regs->esp; 232 unsigned long *sara = (unsigned long *)&regs->esp;
233 struct kretprobe_instance *ri;
234 233
235 if ((ri = get_free_rp_inst(rp)) != NULL) { 234 struct kretprobe_instance *ri;
236 ri->rp = rp; 235
237 ri->task = current; 236 if ((ri = get_free_rp_inst(rp)) != NULL) {
237 ri->rp = rp;
238 ri->task = current;
238 ri->ret_addr = (kprobe_opcode_t *) *sara; 239 ri->ret_addr = (kprobe_opcode_t *) *sara;
239 240
240 /* Replace the return addr with trampoline addr */ 241 /* Replace the return addr with trampoline addr */
241 *sara = (unsigned long) &kretprobe_trampoline; 242 *sara = (unsigned long) &kretprobe_trampoline;
242 243 add_rp_inst(ri);
243 add_rp_inst(ri); 244 } else {
244 } else { 245 rp->nmissed++;
245 rp->nmissed++; 246 }
246 }
247} 247}
248 248
249/* 249/*
@@ -359,7 +359,7 @@ no_kprobe:
359 void __kprobes kretprobe_trampoline_holder(void) 359 void __kprobes kretprobe_trampoline_holder(void)
360 { 360 {
361 asm volatile ( ".global kretprobe_trampoline\n" 361 asm volatile ( ".global kretprobe_trampoline\n"
362 "kretprobe_trampoline: \n" 362 "kretprobe_trampoline: \n"
363 " pushf\n" 363 " pushf\n"
364 /* skip cs, eip, orig_eax, es, ds */ 364 /* skip cs, eip, orig_eax, es, ds */
365 " subl $20, %esp\n" 365 " subl $20, %esp\n"
@@ -395,14 +395,14 @@ no_kprobe:
395 */ 395 */
396fastcall void *__kprobes trampoline_handler(struct pt_regs *regs) 396fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
397{ 397{
398 struct kretprobe_instance *ri = NULL; 398 struct kretprobe_instance *ri = NULL;
399 struct hlist_head *head; 399 struct hlist_head *head;
400 struct hlist_node *node, *tmp; 400 struct hlist_node *node, *tmp;
401 unsigned long flags, orig_ret_address = 0; 401 unsigned long flags, orig_ret_address = 0;
402 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 402 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
403 403
404 spin_lock_irqsave(&kretprobe_lock, flags); 404 spin_lock_irqsave(&kretprobe_lock, flags);
405 head = kretprobe_inst_table_head(current); 405 head = kretprobe_inst_table_head(current);
406 406
407 /* 407 /*
408 * It is possible to have multiple instances associated with a given 408 * It is possible to have multiple instances associated with a given
@@ -413,14 +413,14 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
413 * We can handle this because: 413 * We can handle this because:
414 * - instances are always inserted at the head of the list 414 * - instances are always inserted at the head of the list
415 * - when multiple return probes are registered for the same 415 * - when multiple return probes are registered for the same
416 * function, the first instance's ret_addr will point to the 416 * function, the first instance's ret_addr will point to the
417 * real return address, and all the rest will point to 417 * real return address, and all the rest will point to
418 * kretprobe_trampoline 418 * kretprobe_trampoline
419 */ 419 */
420 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 420 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
421 if (ri->task != current) 421 if (ri->task != current)
422 /* another task is sharing our hash bucket */ 422 /* another task is sharing our hash bucket */
423 continue; 423 continue;
424 424
425 if (ri->rp && ri->rp->handler){ 425 if (ri->rp && ri->rp->handler){
426 __get_cpu_var(current_kprobe) = &ri->rp->kp; 426 __get_cpu_var(current_kprobe) = &ri->rp->kp;
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 169ec3a7156c..9c9c8fcdfbdc 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -90,7 +90,7 @@ static void __kprobes update_kprobe_inst_flag(uint template, uint slot,
90 p->ainsn.target_br_reg = 0; 90 p->ainsn.target_br_reg = 0;
91 91
92 /* Check for Break instruction 92 /* Check for Break instruction
93 * Bits 37:40 Major opcode to be zero 93 * Bits 37:40 Major opcode to be zero
94 * Bits 27:32 X6 to be zero 94 * Bits 27:32 X6 to be zero
95 * Bits 32:35 X3 to be zero 95 * Bits 32:35 X3 to be zero
96 */ 96 */
@@ -104,19 +104,19 @@ static void __kprobes update_kprobe_inst_flag(uint template, uint slot,
104 switch (major_opcode) { 104 switch (major_opcode) {
105 case INDIRECT_CALL_OPCODE: 105 case INDIRECT_CALL_OPCODE:
106 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; 106 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
107 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); 107 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
108 break; 108 break;
109 case IP_RELATIVE_PREDICT_OPCODE: 109 case IP_RELATIVE_PREDICT_OPCODE:
110 case IP_RELATIVE_BRANCH_OPCODE: 110 case IP_RELATIVE_BRANCH_OPCODE:
111 p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; 111 p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
112 break; 112 break;
113 case IP_RELATIVE_CALL_OPCODE: 113 case IP_RELATIVE_CALL_OPCODE:
114 p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; 114 p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
115 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; 115 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
116 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); 116 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
117 break; 117 break;
118 } 118 }
119 } else if (bundle_encoding[template][slot] == X) { 119 } else if (bundle_encoding[template][slot] == X) {
120 switch (major_opcode) { 120 switch (major_opcode) {
121 case LONG_CALL_OPCODE: 121 case LONG_CALL_OPCODE:
122 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; 122 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
@@ -258,18 +258,18 @@ static void __kprobes get_kprobe_inst(bundle_t *bundle, uint slot,
258 258
259 switch (slot) { 259 switch (slot) {
260 case 0: 260 case 0:
261 *major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT); 261 *major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT);
262 *kprobe_inst = bundle->quad0.slot0; 262 *kprobe_inst = bundle->quad0.slot0;
263 break; 263 break;
264 case 1: 264 case 1:
265 *major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT); 265 *major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT);
266 kprobe_inst_p0 = bundle->quad0.slot1_p0; 266 kprobe_inst_p0 = bundle->quad0.slot1_p0;
267 kprobe_inst_p1 = bundle->quad1.slot1_p1; 267 kprobe_inst_p1 = bundle->quad1.slot1_p1;
268 *kprobe_inst = kprobe_inst_p0 | (kprobe_inst_p1 << (64-46)); 268 *kprobe_inst = kprobe_inst_p0 | (kprobe_inst_p1 << (64-46));
269 break; 269 break;
270 case 2: 270 case 2:
271 *major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT); 271 *major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT);
272 *kprobe_inst = bundle->quad1.slot2; 272 *kprobe_inst = bundle->quad1.slot2;
273 break; 273 break;
274 } 274 }
275} 275}
@@ -290,11 +290,11 @@ static int __kprobes valid_kprobe_addr(int template, int slot,
290 return -EINVAL; 290 return -EINVAL;
291 } 291 }
292 292
293 if (in_ivt_functions(addr)) { 293 if (in_ivt_functions(addr)) {
294 printk(KERN_WARNING "Kprobes can't be inserted inside " 294 printk(KERN_WARNING "Kprobes can't be inserted inside "
295 "IVT functions at 0x%lx\n", addr); 295 "IVT functions at 0x%lx\n", addr);
296 return -EINVAL; 296 return -EINVAL;
297 } 297 }
298 298
299 if (slot == 1 && bundle_encoding[template][1] != L) { 299 if (slot == 1 && bundle_encoding[template][1] != L) {
300 printk(KERN_WARNING "Inserting kprobes on slot #1 " 300 printk(KERN_WARNING "Inserting kprobes on slot #1 "
@@ -424,14 +424,14 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
424 bundle_t *bundle; 424 bundle_t *bundle;
425 425
426 bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle; 426 bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle;
427 template = bundle->quad0.template; 427 template = bundle->quad0.template;
428 428
429 if(valid_kprobe_addr(template, slot, addr)) 429 if(valid_kprobe_addr(template, slot, addr))
430 return -EINVAL; 430 return -EINVAL;
431 431
432 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ 432 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
433 if (slot == 1 && bundle_encoding[template][1] == L) 433 if (slot == 1 && bundle_encoding[template][1] == L)
434 slot++; 434 slot++;
435 435
436 /* Get kprobe_inst and major_opcode from the bundle */ 436 /* Get kprobe_inst and major_opcode from the bundle */
437 get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); 437 get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode);
@@ -489,21 +489,22 @@ void __kprobes arch_remove_kprobe(struct kprobe *p)
489 */ 489 */
490static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) 490static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
491{ 491{
492 unsigned long bundle_addr = (unsigned long) (&p->ainsn.insn->bundle); 492 unsigned long bundle_addr = (unsigned long) (&p->ainsn.insn->bundle);
493 unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; 493 unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
494 unsigned long template; 494 unsigned long template;
495 int slot = ((unsigned long)p->addr & 0xf); 495 int slot = ((unsigned long)p->addr & 0xf);
496 496
497 template = p->ainsn.insn->bundle.quad0.template; 497 template = p->ainsn.insn->bundle.quad0.template;
498 498
499 if (slot == 1 && bundle_encoding[template][1] == L) 499 if (slot == 1 && bundle_encoding[template][1] == L)
500 slot = 2; 500 slot = 2;
501 501
502 if (p->ainsn.inst_flag) { 502 if (p->ainsn.inst_flag) {
503 503
504 if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) { 504 if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) {
505 /* Fix relative IP address */ 505 /* Fix relative IP address */
506 regs->cr_iip = (regs->cr_iip - bundle_addr) + resume_addr; 506 regs->cr_iip = (regs->cr_iip - bundle_addr) +
507 resume_addr;
507 } 508 }
508 509
509 if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) { 510 if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) {
@@ -540,18 +541,18 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
540 } 541 }
541 542
542 if (slot == 2) { 543 if (slot == 2) {
543 if (regs->cr_iip == bundle_addr + 0x10) { 544 if (regs->cr_iip == bundle_addr + 0x10) {
544 regs->cr_iip = resume_addr + 0x10; 545 regs->cr_iip = resume_addr + 0x10;
545 } 546 }
546 } else { 547 } else {
547 if (regs->cr_iip == bundle_addr) { 548 if (regs->cr_iip == bundle_addr) {
548 regs->cr_iip = resume_addr; 549 regs->cr_iip = resume_addr;
549 } 550 }
550 } 551 }
551 552
552turn_ss_off: 553turn_ss_off:
553 /* Turn off Single Step bit */ 554 /* Turn off Single Step bit */
554 ia64_psr(regs)->ss = 0; 555 ia64_psr(regs)->ss = 0;
555} 556}
556 557
557static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs) 558static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs)
@@ -587,7 +588,7 @@ static int __kprobes is_ia64_break_inst(struct pt_regs *regs)
587 588
588 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ 589 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
589 if (slot == 1 && bundle_encoding[template][1] == L) 590 if (slot == 1 && bundle_encoding[template][1] == L)
590 slot++; 591 slot++;
591 592
592 /* Get Kprobe probe instruction at given slot*/ 593 /* Get Kprobe probe instruction at given slot*/
593 get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode); 594 get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode);
@@ -627,7 +628,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
627 if (p) { 628 if (p) {
628 if ((kcb->kprobe_status == KPROBE_HIT_SS) && 629 if ((kcb->kprobe_status == KPROBE_HIT_SS) &&
629 (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) { 630 (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) {
630 ia64_psr(regs)->ss = 0; 631 ia64_psr(regs)->ss = 0;
631 goto no_kprobe; 632 goto no_kprobe;
632 } 633 }
633 /* We have reentered the pre_kprobe_handler(), since 634 /* We have reentered the pre_kprobe_handler(), since
@@ -887,7 +888,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
887 * fix the return address to our jprobe_inst_return() function 888 * fix the return address to our jprobe_inst_return() function
888 * in the jprobes.S file 889 * in the jprobes.S file
889 */ 890 */
890 regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip; 891 regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip;
891 892
892 return 1; 893 return 1;
893} 894}
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index cd65c367b8b6..46d2fd0e5789 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -259,14 +259,14 @@ void kretprobe_trampoline_holder(void)
259 */ 259 */
260int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 260int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
261{ 261{
262 struct kretprobe_instance *ri = NULL; 262 struct kretprobe_instance *ri = NULL;
263 struct hlist_head *head; 263 struct hlist_head *head;
264 struct hlist_node *node, *tmp; 264 struct hlist_node *node, *tmp;
265 unsigned long flags, orig_ret_address = 0; 265 unsigned long flags, orig_ret_address = 0;
266 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 266 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
267 267
268 spin_lock_irqsave(&kretprobe_lock, flags); 268 spin_lock_irqsave(&kretprobe_lock, flags);
269 head = kretprobe_inst_table_head(current); 269 head = kretprobe_inst_table_head(current);
270 270
271 /* 271 /*
272 * It is possible to have multiple instances associated with a given 272 * It is possible to have multiple instances associated with a given
@@ -277,14 +277,14 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
277 * We can handle this because: 277 * We can handle this because:
278 * - instances are always inserted at the head of the list 278 * - instances are always inserted at the head of the list
279 * - when multiple return probes are registered for the same 279 * - when multiple return probes are registered for the same
280 * function, the first instance's ret_addr will point to the 280 * function, the first instance's ret_addr will point to the
281 * real return address, and all the rest will point to 281 * real return address, and all the rest will point to
282 * kretprobe_trampoline 282 * kretprobe_trampoline
283 */ 283 */
284 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 284 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
285 if (ri->task != current) 285 if (ri->task != current)
286 /* another task is sharing our hash bucket */ 286 /* another task is sharing our hash bucket */
287 continue; 287 continue;
288 288
289 if (ri->rp && ri->rp->handler) 289 if (ri->rp && ri->rp->handler)
290 ri->rp->handler(ri, regs); 290 ri->rp->handler(ri, regs);
@@ -308,12 +308,12 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
308 spin_unlock_irqrestore(&kretprobe_lock, flags); 308 spin_unlock_irqrestore(&kretprobe_lock, flags);
309 preempt_enable_no_resched(); 309 preempt_enable_no_resched();
310 310
311 /* 311 /*
312 * By returning a non-zero value, we are telling 312 * By returning a non-zero value, we are telling
313 * kprobe_handler() that we don't want the post_handler 313 * kprobe_handler() that we don't want the post_handler
314 * to run (and have re-enabled preemption) 314 * to run (and have re-enabled preemption)
315 */ 315 */
316 return 1; 316 return 1;
317} 317}
318 318
319/* 319/*
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index ffc73ac72485..d04f0ab2ff40 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -270,20 +270,19 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
270 struct pt_regs *regs) 270 struct pt_regs *regs)
271{ 271{
272 unsigned long *sara = (unsigned long *)regs->rsp; 272 unsigned long *sara = (unsigned long *)regs->rsp;
273 struct kretprobe_instance *ri; 273 struct kretprobe_instance *ri;
274 274
275 if ((ri = get_free_rp_inst(rp)) != NULL) { 275 if ((ri = get_free_rp_inst(rp)) != NULL) {
276 ri->rp = rp; 276 ri->rp = rp;
277 ri->task = current; 277 ri->task = current;
278 ri->ret_addr = (kprobe_opcode_t *) *sara; 278 ri->ret_addr = (kprobe_opcode_t *) *sara;
279 279
280 /* Replace the return addr with trampoline addr */ 280 /* Replace the return addr with trampoline addr */
281 *sara = (unsigned long) &kretprobe_trampoline; 281 *sara = (unsigned long) &kretprobe_trampoline;
282 282 add_rp_inst(ri);
283 add_rp_inst(ri); 283 } else {
284 } else { 284 rp->nmissed++;
285 rp->nmissed++; 285 }
286 }
287} 286}
288 287
289int __kprobes kprobe_handler(struct pt_regs *regs) 288int __kprobes kprobe_handler(struct pt_regs *regs)
@@ -405,14 +404,14 @@ no_kprobe:
405 */ 404 */
406int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 405int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
407{ 406{
408 struct kretprobe_instance *ri = NULL; 407 struct kretprobe_instance *ri = NULL;
409 struct hlist_head *head; 408 struct hlist_head *head;
410 struct hlist_node *node, *tmp; 409 struct hlist_node *node, *tmp;
411 unsigned long flags, orig_ret_address = 0; 410 unsigned long flags, orig_ret_address = 0;
412 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 411 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
413 412
414 spin_lock_irqsave(&kretprobe_lock, flags); 413 spin_lock_irqsave(&kretprobe_lock, flags);
415 head = kretprobe_inst_table_head(current); 414 head = kretprobe_inst_table_head(current);
416 415
417 /* 416 /*
418 * It is possible to have multiple instances associated with a given 417 * It is possible to have multiple instances associated with a given
@@ -423,14 +422,14 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
423 * We can handle this because: 422 * We can handle this because:
424 * - instances are always inserted at the head of the list 423 * - instances are always inserted at the head of the list
425 * - when multiple return probes are registered for the same 424 * - when multiple return probes are registered for the same
426 * function, the first instance's ret_addr will point to the 425 * function, the first instance's ret_addr will point to the
427 * real return address, and all the rest will point to 426 * real return address, and all the rest will point to
428 * kretprobe_trampoline 427 * kretprobe_trampoline
429 */ 428 */
430 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 429 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
431 if (ri->task != current) 430 if (ri->task != current)
432 /* another task is sharing our hash bucket */ 431 /* another task is sharing our hash bucket */
433 continue; 432 continue;
434 433
435 if (ri->rp && ri->rp->handler) 434 if (ri->rp && ri->rp->handler)
436 ri->rp->handler(ri, regs); 435 ri->rp->handler(ri, regs);
@@ -454,12 +453,12 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
454 spin_unlock_irqrestore(&kretprobe_lock, flags); 453 spin_unlock_irqrestore(&kretprobe_lock, flags);
455 preempt_enable_no_resched(); 454 preempt_enable_no_resched();
456 455
457 /* 456 /*
458 * By returning a non-zero value, we are telling 457 * By returning a non-zero value, we are telling
459 * kprobe_handler() that we don't want the post_handler 458 * kprobe_handler() that we don't want the post_handler
460 * to run (and have re-enabled preemption) 459 * to run (and have re-enabled preemption)
461 */ 460 */
462 return 1; 461 return 1;
463} 462}
464 463
465/* 464/*
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index f66b8e681b4d..41dfda50e22a 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -347,17 +347,17 @@ struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
347 */ 347 */
348void __kprobes kprobe_flush_task(struct task_struct *tk) 348void __kprobes kprobe_flush_task(struct task_struct *tk)
349{ 349{
350 struct kretprobe_instance *ri; 350 struct kretprobe_instance *ri;
351 struct hlist_head *head; 351 struct hlist_head *head;
352 struct hlist_node *node, *tmp; 352 struct hlist_node *node, *tmp;
353 unsigned long flags = 0; 353 unsigned long flags = 0;
354 354
355 spin_lock_irqsave(&kretprobe_lock, flags); 355 spin_lock_irqsave(&kretprobe_lock, flags);
356 head = kretprobe_inst_table_head(tk); 356 head = kretprobe_inst_table_head(tk);
357 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 357 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
358 if (ri->task == tk) 358 if (ri->task == tk)
359 recycle_rp_inst(ri); 359 recycle_rp_inst(ri);
360 } 360 }
361 spin_unlock_irqrestore(&kretprobe_lock, flags); 361 spin_unlock_irqrestore(&kretprobe_lock, flags);
362} 362}
363 363
@@ -514,7 +514,7 @@ static int __kprobes __register_kprobe(struct kprobe *p,
514 (ARCH_INACTIVE_KPROBE_COUNT + 1)) 514 (ARCH_INACTIVE_KPROBE_COUNT + 1))
515 register_page_fault_notifier(&kprobe_page_fault_nb); 515 register_page_fault_notifier(&kprobe_page_fault_nb);
516 516
517 arch_arm_kprobe(p); 517 arch_arm_kprobe(p);
518 518
519out: 519out:
520 mutex_unlock(&kprobe_mutex); 520 mutex_unlock(&kprobe_mutex);