diff options
| -rw-r--r-- | arch/powerpc/platforms/pseries/lpar.c | 37 |
1 files changed, 37 insertions, 0 deletions
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 5d3ea9f60dd7..ca5d5898d320 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c | |||
| @@ -713,6 +713,13 @@ EXPORT_SYMBOL(arch_free_page); | |||
| 713 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ | 713 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ |
| 714 | extern long hcall_tracepoint_refcount; | 714 | extern long hcall_tracepoint_refcount; |
| 715 | 715 | ||
| 716 | /* | ||
| 717 | * Since the tracing code might execute hcalls we need to guard against | ||
| 718 | * recursion. One example of this are spinlocks calling H_YIELD on | ||
| 719 | * shared processor partitions. | ||
| 720 | */ | ||
| 721 | static DEFINE_PER_CPU(unsigned int, hcall_trace_depth); | ||
| 722 | |||
| 716 | void hcall_tracepoint_regfunc(void) | 723 | void hcall_tracepoint_regfunc(void) |
| 717 | { | 724 | { |
| 718 | hcall_tracepoint_refcount++; | 725 | hcall_tracepoint_refcount++; |
| @@ -725,12 +732,42 @@ void hcall_tracepoint_unregfunc(void) | |||
| 725 | 732 | ||
| 726 | void __trace_hcall_entry(unsigned long opcode, unsigned long *args) | 733 | void __trace_hcall_entry(unsigned long opcode, unsigned long *args) |
| 727 | { | 734 | { |
| 735 | unsigned long flags; | ||
| 736 | unsigned int *depth; | ||
| 737 | |||
| 738 | local_irq_save(flags); | ||
| 739 | |||
| 740 | depth = &__get_cpu_var(hcall_trace_depth); | ||
| 741 | |||
| 742 | if (*depth) | ||
| 743 | goto out; | ||
| 744 | |||
| 745 | (*depth)++; | ||
| 728 | trace_hcall_entry(opcode, args); | 746 | trace_hcall_entry(opcode, args); |
| 747 | (*depth)--; | ||
| 748 | |||
| 749 | out: | ||
| 750 | local_irq_restore(flags); | ||
| 729 | } | 751 | } |
| 730 | 752 | ||
| 731 | void __trace_hcall_exit(long opcode, unsigned long retval, | 753 | void __trace_hcall_exit(long opcode, unsigned long retval, |
| 732 | unsigned long *retbuf) | 754 | unsigned long *retbuf) |
| 733 | { | 755 | { |
| 756 | unsigned long flags; | ||
| 757 | unsigned int *depth; | ||
| 758 | |||
| 759 | local_irq_save(flags); | ||
| 760 | |||
| 761 | depth = &__get_cpu_var(hcall_trace_depth); | ||
| 762 | |||
| 763 | if (*depth) | ||
| 764 | goto out; | ||
| 765 | |||
| 766 | (*depth)++; | ||
| 734 | trace_hcall_exit(opcode, retval, retbuf); | 767 | trace_hcall_exit(opcode, retval, retbuf); |
| 768 | (*depth)--; | ||
| 769 | |||
| 770 | out: | ||
| 771 | local_irq_restore(flags); | ||
| 735 | } | 772 | } |
| 736 | #endif | 773 | #endif |
