diff options
Diffstat (limited to 'arch/powerpc')
23 files changed, 345 insertions, 96 deletions
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 3b1005185390..bf3382f1904d 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug | |||
@@ -46,7 +46,7 @@ config DEBUG_STACK_USAGE | |||
46 | 46 | ||
47 | config HCALL_STATS | 47 | config HCALL_STATS |
48 | bool "Hypervisor call instrumentation" | 48 | bool "Hypervisor call instrumentation" |
49 | depends on PPC_PSERIES && DEBUG_FS | 49 | depends on PPC_PSERIES && DEBUG_FS && TRACEPOINTS |
50 | help | 50 | help |
51 | Adds code to keep track of the number of hypervisor calls made and | 51 | Adds code to keep track of the number of hypervisor calls made and |
52 | the amount of time spent in hypervisor calls. Wall time spent in | 52 | the amount of time spent in hypervisor calls. Wall time spent in |
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index f1889abb89b1..c568329723b8 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig | |||
@@ -1683,7 +1683,7 @@ CONFIG_HAVE_ARCH_KGDB=y | |||
1683 | CONFIG_DEBUG_STACKOVERFLOW=y | 1683 | CONFIG_DEBUG_STACKOVERFLOW=y |
1684 | # CONFIG_DEBUG_STACK_USAGE is not set | 1684 | # CONFIG_DEBUG_STACK_USAGE is not set |
1685 | # CONFIG_DEBUG_PAGEALLOC is not set | 1685 | # CONFIG_DEBUG_PAGEALLOC is not set |
1686 | CONFIG_HCALL_STATS=y | 1686 | # CONFIG_HCALL_STATS is not set |
1687 | # CONFIG_CODE_PATCHING_SELFTEST is not set | 1687 | # CONFIG_CODE_PATCHING_SELFTEST is not set |
1688 | # CONFIG_FTR_FIXUP_SELFTEST is not set | 1688 | # CONFIG_FTR_FIXUP_SELFTEST is not set |
1689 | # CONFIG_MSI_BITMAP_SELFTEST is not set | 1689 | # CONFIG_MSI_BITMAP_SELFTEST is not set |
diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h index 9154e8526732..f0fb4fc1f6e6 100644 --- a/arch/powerpc/include/asm/emulated_ops.h +++ b/arch/powerpc/include/asm/emulated_ops.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #define _ASM_POWERPC_EMULATED_OPS_H | 19 | #define _ASM_POWERPC_EMULATED_OPS_H |
20 | 20 | ||
21 | #include <asm/atomic.h> | 21 | #include <asm/atomic.h> |
22 | #include <linux/perf_event.h> | ||
22 | 23 | ||
23 | 24 | ||
24 | #ifdef CONFIG_PPC_EMULATED_STATS | 25 | #ifdef CONFIG_PPC_EMULATED_STATS |
@@ -57,7 +58,7 @@ extern u32 ppc_warn_emulated; | |||
57 | 58 | ||
58 | extern void ppc_warn_emulated_print(const char *type); | 59 | extern void ppc_warn_emulated_print(const char *type); |
59 | 60 | ||
60 | #define PPC_WARN_EMULATED(type) \ | 61 | #define __PPC_WARN_EMULATED(type) \ |
61 | do { \ | 62 | do { \ |
62 | atomic_inc(&ppc_emulated.type.val); \ | 63 | atomic_inc(&ppc_emulated.type.val); \ |
63 | if (ppc_warn_emulated) \ | 64 | if (ppc_warn_emulated) \ |
@@ -66,8 +67,22 @@ extern void ppc_warn_emulated_print(const char *type); | |||
66 | 67 | ||
67 | #else /* !CONFIG_PPC_EMULATED_STATS */ | 68 | #else /* !CONFIG_PPC_EMULATED_STATS */ |
68 | 69 | ||
69 | #define PPC_WARN_EMULATED(type) do { } while (0) | 70 | #define __PPC_WARN_EMULATED(type) do { } while (0) |
70 | 71 | ||
71 | #endif /* !CONFIG_PPC_EMULATED_STATS */ | 72 | #endif /* !CONFIG_PPC_EMULATED_STATS */ |
72 | 73 | ||
74 | #define PPC_WARN_EMULATED(type, regs) \ | ||
75 | do { \ | ||
76 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \ | ||
77 | 1, 0, regs, 0); \ | ||
78 | __PPC_WARN_EMULATED(type); \ | ||
79 | } while (0) | ||
80 | |||
81 | #define PPC_WARN_ALIGNMENT(type, regs) \ | ||
82 | do { \ | ||
83 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \ | ||
84 | 1, 0, regs, regs->dar); \ | ||
85 | __PPC_WARN_EMULATED(type); \ | ||
86 | } while (0) | ||
87 | |||
73 | #endif /* _ASM_POWERPC_EMULATED_OPS_H */ | 88 | #endif /* _ASM_POWERPC_EMULATED_OPS_H */ |
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 6251a4b10be7..c27caac47ad1 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h | |||
@@ -274,6 +274,8 @@ struct hcall_stats { | |||
274 | unsigned long num_calls; /* number of calls (on this CPU) */ | 274 | unsigned long num_calls; /* number of calls (on this CPU) */ |
275 | unsigned long tb_total; /* total wall time (mftb) of calls. */ | 275 | unsigned long tb_total; /* total wall time (mftb) of calls. */ |
276 | unsigned long purr_total; /* total cpu time (PURR) of calls. */ | 276 | unsigned long purr_total; /* total cpu time (PURR) of calls. */ |
277 | unsigned long tb_start; | ||
278 | unsigned long purr_start; | ||
277 | }; | 279 | }; |
278 | #define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1) | 280 | #define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1) |
279 | 281 | ||
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 6315edc205d8..bc8dd53f718a 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -489,6 +489,8 @@ | |||
489 | #define SPRN_MMCR1 798 | 489 | #define SPRN_MMCR1 798 |
490 | #define SPRN_MMCRA 0x312 | 490 | #define SPRN_MMCRA 0x312 |
491 | #define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ | 491 | #define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ |
492 | #define MMCRA_SDAR_DCACHE_MISS 0x40000000UL | ||
493 | #define MMCRA_SDAR_ERAT_MISS 0x20000000UL | ||
492 | #define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */ | 494 | #define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */ |
493 | #define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */ | 495 | #define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */ |
494 | #define MMCRA_SLOT 0x07000000UL /* SLOT bits (37-39) */ | 496 | #define MMCRA_SLOT 0x07000000UL /* SLOT bits (37-39) */ |
diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h new file mode 100644 index 000000000000..cbe2297d68b6 --- /dev/null +++ b/arch/powerpc/include/asm/trace.h | |||
@@ -0,0 +1,133 @@ | |||
1 | #undef TRACE_SYSTEM | ||
2 | #define TRACE_SYSTEM powerpc | ||
3 | |||
4 | #if !defined(_TRACE_POWERPC_H) || defined(TRACE_HEADER_MULTI_READ) | ||
5 | #define _TRACE_POWERPC_H | ||
6 | |||
7 | #include <linux/tracepoint.h> | ||
8 | |||
9 | struct pt_regs; | ||
10 | |||
11 | TRACE_EVENT(irq_entry, | ||
12 | |||
13 | TP_PROTO(struct pt_regs *regs), | ||
14 | |||
15 | TP_ARGS(regs), | ||
16 | |||
17 | TP_STRUCT__entry( | ||
18 | __field(struct pt_regs *, regs) | ||
19 | ), | ||
20 | |||
21 | TP_fast_assign( | ||
22 | __entry->regs = regs; | ||
23 | ), | ||
24 | |||
25 | TP_printk("pt_regs=%p", __entry->regs) | ||
26 | ); | ||
27 | |||
28 | TRACE_EVENT(irq_exit, | ||
29 | |||
30 | TP_PROTO(struct pt_regs *regs), | ||
31 | |||
32 | TP_ARGS(regs), | ||
33 | |||
34 | TP_STRUCT__entry( | ||
35 | __field(struct pt_regs *, regs) | ||
36 | ), | ||
37 | |||
38 | TP_fast_assign( | ||
39 | __entry->regs = regs; | ||
40 | ), | ||
41 | |||
42 | TP_printk("pt_regs=%p", __entry->regs) | ||
43 | ); | ||
44 | |||
45 | TRACE_EVENT(timer_interrupt_entry, | ||
46 | |||
47 | TP_PROTO(struct pt_regs *regs), | ||
48 | |||
49 | TP_ARGS(regs), | ||
50 | |||
51 | TP_STRUCT__entry( | ||
52 | __field(struct pt_regs *, regs) | ||
53 | ), | ||
54 | |||
55 | TP_fast_assign( | ||
56 | __entry->regs = regs; | ||
57 | ), | ||
58 | |||
59 | TP_printk("pt_regs=%p", __entry->regs) | ||
60 | ); | ||
61 | |||
62 | TRACE_EVENT(timer_interrupt_exit, | ||
63 | |||
64 | TP_PROTO(struct pt_regs *regs), | ||
65 | |||
66 | TP_ARGS(regs), | ||
67 | |||
68 | TP_STRUCT__entry( | ||
69 | __field(struct pt_regs *, regs) | ||
70 | ), | ||
71 | |||
72 | TP_fast_assign( | ||
73 | __entry->regs = regs; | ||
74 | ), | ||
75 | |||
76 | TP_printk("pt_regs=%p", __entry->regs) | ||
77 | ); | ||
78 | |||
79 | #ifdef CONFIG_PPC_PSERIES | ||
80 | extern void hcall_tracepoint_regfunc(void); | ||
81 | extern void hcall_tracepoint_unregfunc(void); | ||
82 | |||
83 | TRACE_EVENT_FN(hcall_entry, | ||
84 | |||
85 | TP_PROTO(unsigned long opcode, unsigned long *args), | ||
86 | |||
87 | TP_ARGS(opcode, args), | ||
88 | |||
89 | TP_STRUCT__entry( | ||
90 | __field(unsigned long, opcode) | ||
91 | ), | ||
92 | |||
93 | TP_fast_assign( | ||
94 | __entry->opcode = opcode; | ||
95 | ), | ||
96 | |||
97 | TP_printk("opcode=%lu", __entry->opcode), | ||
98 | |||
99 | hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc | ||
100 | ); | ||
101 | |||
102 | TRACE_EVENT_FN(hcall_exit, | ||
103 | |||
104 | TP_PROTO(unsigned long opcode, unsigned long retval, | ||
105 | unsigned long *retbuf), | ||
106 | |||
107 | TP_ARGS(opcode, retval, retbuf), | ||
108 | |||
109 | TP_STRUCT__entry( | ||
110 | __field(unsigned long, opcode) | ||
111 | __field(unsigned long, retval) | ||
112 | ), | ||
113 | |||
114 | TP_fast_assign( | ||
115 | __entry->opcode = opcode; | ||
116 | __entry->retval = retval; | ||
117 | ), | ||
118 | |||
119 | TP_printk("opcode=%lu retval=%lu", __entry->opcode, __entry->retval), | ||
120 | |||
121 | hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc | ||
122 | ); | ||
123 | #endif | ||
124 | |||
125 | #endif /* _TRACE_POWERPC_H */ | ||
126 | |||
127 | #undef TRACE_INCLUDE_PATH | ||
128 | #undef TRACE_INCLUDE_FILE | ||
129 | |||
130 | #define TRACE_INCLUDE_PATH asm | ||
131 | #define TRACE_INCLUDE_FILE trace | ||
132 | |||
133 | #include <trace/define_trace.h> | ||
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index a5b632e52fae..3839839f83c7 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c | |||
@@ -732,7 +732,7 @@ int fix_alignment(struct pt_regs *regs) | |||
732 | 732 | ||
733 | #ifdef CONFIG_SPE | 733 | #ifdef CONFIG_SPE |
734 | if ((instr >> 26) == 0x4) { | 734 | if ((instr >> 26) == 0x4) { |
735 | PPC_WARN_EMULATED(spe); | 735 | PPC_WARN_ALIGNMENT(spe, regs); |
736 | return emulate_spe(regs, reg, instr); | 736 | return emulate_spe(regs, reg, instr); |
737 | } | 737 | } |
738 | #endif | 738 | #endif |
@@ -786,7 +786,7 @@ int fix_alignment(struct pt_regs *regs) | |||
786 | flags |= SPLT; | 786 | flags |= SPLT; |
787 | nb = 8; | 787 | nb = 8; |
788 | } | 788 | } |
789 | PPC_WARN_EMULATED(vsx); | 789 | PPC_WARN_ALIGNMENT(vsx, regs); |
790 | return emulate_vsx(addr, reg, areg, regs, flags, nb); | 790 | return emulate_vsx(addr, reg, areg, regs, flags, nb); |
791 | } | 791 | } |
792 | #endif | 792 | #endif |
@@ -794,7 +794,7 @@ int fix_alignment(struct pt_regs *regs) | |||
794 | * the exception of DCBZ which is handled as a special case here | 794 | * the exception of DCBZ which is handled as a special case here |
795 | */ | 795 | */ |
796 | if (instr == DCBZ) { | 796 | if (instr == DCBZ) { |
797 | PPC_WARN_EMULATED(dcbz); | 797 | PPC_WARN_ALIGNMENT(dcbz, regs); |
798 | return emulate_dcbz(regs, addr); | 798 | return emulate_dcbz(regs, addr); |
799 | } | 799 | } |
800 | if (unlikely(nb == 0)) | 800 | if (unlikely(nb == 0)) |
@@ -804,7 +804,7 @@ int fix_alignment(struct pt_regs *regs) | |||
804 | * function | 804 | * function |
805 | */ | 805 | */ |
806 | if (flags & M) { | 806 | if (flags & M) { |
807 | PPC_WARN_EMULATED(multiple); | 807 | PPC_WARN_ALIGNMENT(multiple, regs); |
808 | return emulate_multiple(regs, addr, reg, nb, | 808 | return emulate_multiple(regs, addr, reg, nb, |
809 | flags, instr, swiz); | 809 | flags, instr, swiz); |
810 | } | 810 | } |
@@ -825,11 +825,11 @@ int fix_alignment(struct pt_regs *regs) | |||
825 | 825 | ||
826 | /* Special case for 16-byte FP loads and stores */ | 826 | /* Special case for 16-byte FP loads and stores */ |
827 | if (nb == 16) { | 827 | if (nb == 16) { |
828 | PPC_WARN_EMULATED(fp_pair); | 828 | PPC_WARN_ALIGNMENT(fp_pair, regs); |
829 | return emulate_fp_pair(addr, reg, flags); | 829 | return emulate_fp_pair(addr, reg, flags); |
830 | } | 830 | } |
831 | 831 | ||
832 | PPC_WARN_EMULATED(unaligned); | 832 | PPC_WARN_ALIGNMENT(unaligned, regs); |
833 | 833 | ||
834 | /* If we are loading, get the data from user space, else | 834 | /* If we are loading, get the data from user space, else |
835 | * get it from register values | 835 | * get it from register values |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 9763267e38b4..bdcb557d470a 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -551,7 +551,7 @@ restore: | |||
551 | BEGIN_FW_FTR_SECTION | 551 | BEGIN_FW_FTR_SECTION |
552 | ld r5,SOFTE(r1) | 552 | ld r5,SOFTE(r1) |
553 | FW_FTR_SECTION_ELSE | 553 | FW_FTR_SECTION_ELSE |
554 | b iseries_check_pending_irqs | 554 | b .Liseries_check_pending_irqs |
555 | ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) | 555 | ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) |
556 | 2: | 556 | 2: |
557 | TRACE_AND_RESTORE_IRQ(r5); | 557 | TRACE_AND_RESTORE_IRQ(r5); |
@@ -623,7 +623,7 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) | |||
623 | 623 | ||
624 | #endif /* CONFIG_PPC_BOOK3E */ | 624 | #endif /* CONFIG_PPC_BOOK3E */ |
625 | 625 | ||
626 | iseries_check_pending_irqs: | 626 | .Liseries_check_pending_irqs: |
627 | #ifdef CONFIG_PPC_ISERIES | 627 | #ifdef CONFIG_PPC_ISERIES |
628 | ld r5,SOFTE(r1) | 628 | ld r5,SOFTE(r1) |
629 | cmpdi 0,r5,0 | 629 | cmpdi 0,r5,0 |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 1808876edcc9..c7eb4e0eb86c 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -185,12 +185,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) | |||
185 | * prolog code of the PerformanceMonitor one. A little | 185 | * prolog code of the PerformanceMonitor one. A little |
186 | * trickery is thus necessary | 186 | * trickery is thus necessary |
187 | */ | 187 | */ |
188 | performance_monitor_pSeries_1: | ||
188 | . = 0xf00 | 189 | . = 0xf00 |
189 | b performance_monitor_pSeries | 190 | b performance_monitor_pSeries |
190 | 191 | ||
192 | altivec_unavailable_pSeries_1: | ||
191 | . = 0xf20 | 193 | . = 0xf20 |
192 | b altivec_unavailable_pSeries | 194 | b altivec_unavailable_pSeries |
193 | 195 | ||
196 | vsx_unavailable_pSeries_1: | ||
194 | . = 0xf40 | 197 | . = 0xf40 |
195 | b vsx_unavailable_pSeries | 198 | b vsx_unavailable_pSeries |
196 | 199 | ||
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index e5d121177984..02a334662cc0 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -70,6 +70,8 @@ | |||
70 | #include <asm/firmware.h> | 70 | #include <asm/firmware.h> |
71 | #include <asm/lv1call.h> | 71 | #include <asm/lv1call.h> |
72 | #endif | 72 | #endif |
73 | #define CREATE_TRACE_POINTS | ||
74 | #include <asm/trace.h> | ||
73 | 75 | ||
74 | int __irq_offset_value; | 76 | int __irq_offset_value; |
75 | static int ppc_spurious_interrupts; | 77 | static int ppc_spurious_interrupts; |
@@ -325,6 +327,8 @@ void do_IRQ(struct pt_regs *regs) | |||
325 | struct pt_regs *old_regs = set_irq_regs(regs); | 327 | struct pt_regs *old_regs = set_irq_regs(regs); |
326 | unsigned int irq; | 328 | unsigned int irq; |
327 | 329 | ||
330 | trace_irq_entry(regs); | ||
331 | |||
328 | irq_enter(); | 332 | irq_enter(); |
329 | 333 | ||
330 | check_stack_overflow(); | 334 | check_stack_overflow(); |
@@ -348,6 +352,8 @@ void do_IRQ(struct pt_regs *regs) | |||
348 | timer_interrupt(regs); | 352 | timer_interrupt(regs); |
349 | } | 353 | } |
350 | #endif | 354 | #endif |
355 | |||
356 | trace_irq_exit(regs); | ||
351 | } | 357 | } |
352 | 358 | ||
353 | void __init init_IRQ(void) | 359 | void __init init_IRQ(void) |
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 87f1663584b0..1eb85fbf53a5 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -1165,7 +1165,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
1165 | */ | 1165 | */ |
1166 | if (record) { | 1166 | if (record) { |
1167 | struct perf_sample_data data = { | 1167 | struct perf_sample_data data = { |
1168 | .addr = 0, | 1168 | .addr = ~0ULL, |
1169 | .period = event->hw.last_period, | 1169 | .period = event->hw.last_period, |
1170 | }; | 1170 | }; |
1171 | 1171 | ||
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c index 0f4c1c73a6ad..199de527d411 100644 --- a/arch/powerpc/kernel/power5+-pmu.c +++ b/arch/powerpc/kernel/power5+-pmu.c | |||
@@ -73,10 +73,6 @@ | |||
73 | #define MMCR1_PMCSEL_MSK 0x7f | 73 | #define MMCR1_PMCSEL_MSK 0x7f |
74 | 74 | ||
75 | /* | 75 | /* |
76 | * Bits in MMCRA | ||
77 | */ | ||
78 | |||
79 | /* | ||
80 | * Layout of constraint bits: | 76 | * Layout of constraint bits: |
81 | * 6666555555555544444444443333333333222222222211111111110000000000 | 77 | * 6666555555555544444444443333333333222222222211111111110000000000 |
82 | * 3210987654321098765432109876543210987654321098765432109876543210 | 78 | * 3210987654321098765432109876543210987654321098765432109876543210 |
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c index c351b3a57fbb..98b6a729a9dd 100644 --- a/arch/powerpc/kernel/power5-pmu.c +++ b/arch/powerpc/kernel/power5-pmu.c | |||
@@ -73,10 +73,6 @@ | |||
73 | #define MMCR1_PMCSEL_MSK 0x7f | 73 | #define MMCR1_PMCSEL_MSK 0x7f |
74 | 74 | ||
75 | /* | 75 | /* |
76 | * Bits in MMCRA | ||
77 | */ | ||
78 | |||
79 | /* | ||
80 | * Layout of constraint bits: | 76 | * Layout of constraint bits: |
81 | * 6666555555555544444444443333333333222222222211111111110000000000 | 77 | * 6666555555555544444444443333333333222222222211111111110000000000 |
82 | * 3210987654321098765432109876543210987654321098765432109876543210 | 78 | * 3210987654321098765432109876543210987654321098765432109876543210 |
@@ -390,7 +386,7 @@ static int power5_compute_mmcr(u64 event[], int n_ev, | |||
390 | unsigned int hwc[], unsigned long mmcr[]) | 386 | unsigned int hwc[], unsigned long mmcr[]) |
391 | { | 387 | { |
392 | unsigned long mmcr1 = 0; | 388 | unsigned long mmcr1 = 0; |
393 | unsigned long mmcra = 0; | 389 | unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; |
394 | unsigned int pmc, unit, byte, psel; | 390 | unsigned int pmc, unit, byte, psel; |
395 | unsigned int ttm, grp; | 391 | unsigned int ttm, grp; |
396 | int i, isbus, bit, grsel; | 392 | int i, isbus, bit, grsel; |
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c index ca399ba5034c..84a607bda8fb 100644 --- a/arch/powerpc/kernel/power6-pmu.c +++ b/arch/powerpc/kernel/power6-pmu.c | |||
@@ -178,7 +178,7 @@ static int p6_compute_mmcr(u64 event[], int n_ev, | |||
178 | unsigned int hwc[], unsigned long mmcr[]) | 178 | unsigned int hwc[], unsigned long mmcr[]) |
179 | { | 179 | { |
180 | unsigned long mmcr1 = 0; | 180 | unsigned long mmcr1 = 0; |
181 | unsigned long mmcra = 0; | 181 | unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; |
182 | int i; | 182 | int i; |
183 | unsigned int pmc, ev, b, u, s, psel; | 183 | unsigned int pmc, ev, b, u, s, psel; |
184 | unsigned int ttmset = 0; | 184 | unsigned int ttmset = 0; |
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c index 28a4daacdc02..852f7b7f6b40 100644 --- a/arch/powerpc/kernel/power7-pmu.c +++ b/arch/powerpc/kernel/power7-pmu.c | |||
@@ -51,10 +51,6 @@ | |||
51 | #define MMCR1_PMCSEL_MSK 0xff | 51 | #define MMCR1_PMCSEL_MSK 0xff |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * Bits in MMCRA | ||
55 | */ | ||
56 | |||
57 | /* | ||
58 | * Layout of constraint bits: | 54 | * Layout of constraint bits: |
59 | * 6666555555555544444444443333333333222222222211111111110000000000 | 55 | * 6666555555555544444444443333333333222222222211111111110000000000 |
60 | * 3210987654321098765432109876543210987654321098765432109876543210 | 56 | * 3210987654321098765432109876543210987654321098765432109876543210 |
@@ -230,7 +226,7 @@ static int power7_compute_mmcr(u64 event[], int n_ev, | |||
230 | unsigned int hwc[], unsigned long mmcr[]) | 226 | unsigned int hwc[], unsigned long mmcr[]) |
231 | { | 227 | { |
232 | unsigned long mmcr1 = 0; | 228 | unsigned long mmcr1 = 0; |
233 | unsigned long mmcra = 0; | 229 | unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; |
234 | unsigned int pmc, unit, combine, l2sel, psel; | 230 | unsigned int pmc, unit, combine, l2sel, psel; |
235 | unsigned int pmc_inuse = 0; | 231 | unsigned int pmc_inuse = 0; |
236 | int i; | 232 | int i; |
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c index 479574413a93..8eff48e20dba 100644 --- a/arch/powerpc/kernel/ppc970-pmu.c +++ b/arch/powerpc/kernel/ppc970-pmu.c | |||
@@ -84,10 +84,6 @@ static short mmcr1_adder_bits[8] = { | |||
84 | }; | 84 | }; |
85 | 85 | ||
86 | /* | 86 | /* |
87 | * Bits in MMCRA | ||
88 | */ | ||
89 | |||
90 | /* | ||
91 | * Layout of constraint bits: | 87 | * Layout of constraint bits: |
92 | * 6666555555555544444444443333333333222222222211111111110000000000 | 88 | * 6666555555555544444444443333333333222222222211111111110000000000 |
93 | * 3210987654321098765432109876543210987654321098765432109876543210 | 89 | * 3210987654321098765432109876543210987654321098765432109876543210 |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 4271f7a655a3..845c72ab7357 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
@@ -660,6 +660,7 @@ late_initcall(check_cache_coherency); | |||
660 | 660 | ||
661 | #ifdef CONFIG_DEBUG_FS | 661 | #ifdef CONFIG_DEBUG_FS |
662 | struct dentry *powerpc_debugfs_root; | 662 | struct dentry *powerpc_debugfs_root; |
663 | EXPORT_SYMBOL(powerpc_debugfs_root); | ||
663 | 664 | ||
664 | static int powerpc_debugfs_init(void) | 665 | static int powerpc_debugfs_init(void) |
665 | { | 666 | { |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index a136a11c490d..36707dec94d7 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #include <linux/irq.h> | 54 | #include <linux/irq.h> |
55 | #include <linux/delay.h> | 55 | #include <linux/delay.h> |
56 | #include <linux/perf_event.h> | 56 | #include <linux/perf_event.h> |
57 | #include <asm/trace.h> | ||
57 | 58 | ||
58 | #include <asm/io.h> | 59 | #include <asm/io.h> |
59 | #include <asm/processor.h> | 60 | #include <asm/processor.h> |
@@ -571,6 +572,8 @@ void timer_interrupt(struct pt_regs * regs) | |||
571 | struct clock_event_device *evt = &decrementer->event; | 572 | struct clock_event_device *evt = &decrementer->event; |
572 | u64 now; | 573 | u64 now; |
573 | 574 | ||
575 | trace_timer_interrupt_entry(regs); | ||
576 | |||
574 | /* Ensure a positive value is written to the decrementer, or else | 577 | /* Ensure a positive value is written to the decrementer, or else |
575 | * some CPUs will continuue to take decrementer exceptions */ | 578 | * some CPUs will continuue to take decrementer exceptions */ |
576 | set_dec(DECREMENTER_MAX); | 579 | set_dec(DECREMENTER_MAX); |
@@ -590,6 +593,7 @@ void timer_interrupt(struct pt_regs * regs) | |||
590 | now = decrementer->next_tb - now; | 593 | now = decrementer->next_tb - now; |
591 | if (now <= DECREMENTER_MAX) | 594 | if (now <= DECREMENTER_MAX) |
592 | set_dec((int)now); | 595 | set_dec((int)now); |
596 | trace_timer_interrupt_exit(regs); | ||
593 | return; | 597 | return; |
594 | } | 598 | } |
595 | old_regs = set_irq_regs(regs); | 599 | old_regs = set_irq_regs(regs); |
@@ -620,6 +624,8 @@ void timer_interrupt(struct pt_regs * regs) | |||
620 | 624 | ||
621 | irq_exit(); | 625 | irq_exit(); |
622 | set_irq_regs(old_regs); | 626 | set_irq_regs(old_regs); |
627 | |||
628 | trace_timer_interrupt_exit(regs); | ||
623 | } | 629 | } |
624 | 630 | ||
625 | void wakeup_decrementer(void) | 631 | void wakeup_decrementer(void) |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 6f0ae1a9bfae..9d1f9354d6ca 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -759,7 +759,7 @@ static int emulate_instruction(struct pt_regs *regs) | |||
759 | 759 | ||
760 | /* Emulate the mfspr rD, PVR. */ | 760 | /* Emulate the mfspr rD, PVR. */ |
761 | if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { | 761 | if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { |
762 | PPC_WARN_EMULATED(mfpvr); | 762 | PPC_WARN_EMULATED(mfpvr, regs); |
763 | rd = (instword >> 21) & 0x1f; | 763 | rd = (instword >> 21) & 0x1f; |
764 | regs->gpr[rd] = mfspr(SPRN_PVR); | 764 | regs->gpr[rd] = mfspr(SPRN_PVR); |
765 | return 0; | 765 | return 0; |
@@ -767,7 +767,7 @@ static int emulate_instruction(struct pt_regs *regs) | |||
767 | 767 | ||
768 | /* Emulating the dcba insn is just a no-op. */ | 768 | /* Emulating the dcba insn is just a no-op. */ |
769 | if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { | 769 | if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { |
770 | PPC_WARN_EMULATED(dcba); | 770 | PPC_WARN_EMULATED(dcba, regs); |
771 | return 0; | 771 | return 0; |
772 | } | 772 | } |
773 | 773 | ||
@@ -776,7 +776,7 @@ static int emulate_instruction(struct pt_regs *regs) | |||
776 | int shift = (instword >> 21) & 0x1c; | 776 | int shift = (instword >> 21) & 0x1c; |
777 | unsigned long msk = 0xf0000000UL >> shift; | 777 | unsigned long msk = 0xf0000000UL >> shift; |
778 | 778 | ||
779 | PPC_WARN_EMULATED(mcrxr); | 779 | PPC_WARN_EMULATED(mcrxr, regs); |
780 | regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); | 780 | regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); |
781 | regs->xer &= ~0xf0000000UL; | 781 | regs->xer &= ~0xf0000000UL; |
782 | return 0; | 782 | return 0; |
@@ -784,19 +784,19 @@ static int emulate_instruction(struct pt_regs *regs) | |||
784 | 784 | ||
785 | /* Emulate load/store string insn. */ | 785 | /* Emulate load/store string insn. */ |
786 | if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { | 786 | if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { |
787 | PPC_WARN_EMULATED(string); | 787 | PPC_WARN_EMULATED(string, regs); |
788 | return emulate_string_inst(regs, instword); | 788 | return emulate_string_inst(regs, instword); |
789 | } | 789 | } |
790 | 790 | ||
791 | /* Emulate the popcntb (Population Count Bytes) instruction. */ | 791 | /* Emulate the popcntb (Population Count Bytes) instruction. */ |
792 | if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { | 792 | if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { |
793 | PPC_WARN_EMULATED(popcntb); | 793 | PPC_WARN_EMULATED(popcntb, regs); |
794 | return emulate_popcntb_inst(regs, instword); | 794 | return emulate_popcntb_inst(regs, instword); |
795 | } | 795 | } |
796 | 796 | ||
797 | /* Emulate isel (Integer Select) instruction */ | 797 | /* Emulate isel (Integer Select) instruction */ |
798 | if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { | 798 | if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { |
799 | PPC_WARN_EMULATED(isel); | 799 | PPC_WARN_EMULATED(isel, regs); |
800 | return emulate_isel(regs, instword); | 800 | return emulate_isel(regs, instword); |
801 | } | 801 | } |
802 | 802 | ||
@@ -995,7 +995,7 @@ void SoftwareEmulation(struct pt_regs *regs) | |||
995 | #ifdef CONFIG_MATH_EMULATION | 995 | #ifdef CONFIG_MATH_EMULATION |
996 | errcode = do_mathemu(regs); | 996 | errcode = do_mathemu(regs); |
997 | if (errcode >= 0) | 997 | if (errcode >= 0) |
998 | PPC_WARN_EMULATED(math); | 998 | PPC_WARN_EMULATED(math, regs); |
999 | 999 | ||
1000 | switch (errcode) { | 1000 | switch (errcode) { |
1001 | case 0: | 1001 | case 0: |
@@ -1018,7 +1018,7 @@ void SoftwareEmulation(struct pt_regs *regs) | |||
1018 | #elif defined(CONFIG_8XX_MINIMAL_FPEMU) | 1018 | #elif defined(CONFIG_8XX_MINIMAL_FPEMU) |
1019 | errcode = Soft_emulate_8xx(regs); | 1019 | errcode = Soft_emulate_8xx(regs); |
1020 | if (errcode >= 0) | 1020 | if (errcode >= 0) |
1021 | PPC_WARN_EMULATED(8xx); | 1021 | PPC_WARN_EMULATED(8xx, regs); |
1022 | 1022 | ||
1023 | switch (errcode) { | 1023 | switch (errcode) { |
1024 | case 0: | 1024 | case 0: |
@@ -1129,7 +1129,7 @@ void altivec_assist_exception(struct pt_regs *regs) | |||
1129 | 1129 | ||
1130 | flush_altivec_to_thread(current); | 1130 | flush_altivec_to_thread(current); |
1131 | 1131 | ||
1132 | PPC_WARN_EMULATED(altivec); | 1132 | PPC_WARN_EMULATED(altivec, regs); |
1133 | err = emulate_altivec(regs); | 1133 | err = emulate_altivec(regs); |
1134 | if (err == 0) { | 1134 | if (err == 0) { |
1135 | regs->nip += 4; /* skip emulated instruction */ | 1135 | regs->nip += 4; /* skip emulated instruction */ |
diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S index 75f3267fdc30..e68beac0a171 100644 --- a/arch/powerpc/lib/copypage_64.S +++ b/arch/powerpc/lib/copypage_64.S | |||
@@ -26,11 +26,11 @@ BEGIN_FTR_SECTION | |||
26 | srd r8,r5,r11 | 26 | srd r8,r5,r11 |
27 | 27 | ||
28 | mtctr r8 | 28 | mtctr r8 |
29 | setup: | 29 | .Lsetup: |
30 | dcbt r9,r4 | 30 | dcbt r9,r4 |
31 | dcbz r9,r3 | 31 | dcbz r9,r3 |
32 | add r9,r9,r12 | 32 | add r9,r9,r12 |
33 | bdnz setup | 33 | bdnz .Lsetup |
34 | END_FTR_SECTION_IFSET(CPU_FTR_CP_USE_DCBTZ) | 34 | END_FTR_SECTION_IFSET(CPU_FTR_CP_USE_DCBTZ) |
35 | addi r3,r3,-8 | 35 | addi r3,r3,-8 |
36 | srdi r8,r5,7 /* page is copied in 128 byte strides */ | 36 | srdi r8,r5,7 /* page is copied in 128 byte strides */ |
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index c1427b3634ec..383a5d0e9818 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S | |||
@@ -14,68 +14,94 @@ | |||
14 | 14 | ||
15 | #define STK_PARM(i) (48 + ((i)-3)*8) | 15 | #define STK_PARM(i) (48 + ((i)-3)*8) |
16 | 16 | ||
17 | #ifdef CONFIG_HCALL_STATS | 17 | #ifdef CONFIG_TRACEPOINTS |
18 | |||
19 | .section ".toc","aw" | ||
20 | |||
21 | .globl hcall_tracepoint_refcount | ||
22 | hcall_tracepoint_refcount: | ||
23 | .llong 0 | ||
24 | |||
25 | .section ".text" | ||
26 | |||
18 | /* | 27 | /* |
19 | * precall must preserve all registers. use unused STK_PARM() | 28 | * precall must preserve all registers. use unused STK_PARM() |
20 | * areas to save snapshots and opcode. | 29 | * areas to save snapshots and opcode. We branch around this |
30 | * in early init (eg when populating the MMU hashtable) by using an | ||
31 | * unconditional cpu feature. | ||
21 | */ | 32 | */ |
22 | #define HCALL_INST_PRECALL \ | 33 | #define HCALL_INST_PRECALL(FIRST_REG) \ |
23 | std r3,STK_PARM(r3)(r1); /* save opcode */ \ | ||
24 | mftb r0; /* get timebase and */ \ | ||
25 | std r0,STK_PARM(r5)(r1); /* save for later */ \ | ||
26 | BEGIN_FTR_SECTION; \ | 34 | BEGIN_FTR_SECTION; \ |
27 | mfspr r0,SPRN_PURR; /* get PURR and */ \ | 35 | b 1f; \ |
28 | std r0,STK_PARM(r6)(r1); /* save for later */ \ | 36 | END_FTR_SECTION(0, 1); \ |
29 | END_FTR_SECTION_IFSET(CPU_FTR_PURR); | 37 | ld r12,hcall_tracepoint_refcount@toc(r2); \ |
30 | 38 | cmpdi r12,0; \ | |
39 | beq+ 1f; \ | ||
40 | mflr r0; \ | ||
41 | std r3,STK_PARM(r3)(r1); \ | ||
42 | std r4,STK_PARM(r4)(r1); \ | ||
43 | std r5,STK_PARM(r5)(r1); \ | ||
44 | std r6,STK_PARM(r6)(r1); \ | ||
45 | std r7,STK_PARM(r7)(r1); \ | ||
46 | std r8,STK_PARM(r8)(r1); \ | ||
47 | std r9,STK_PARM(r9)(r1); \ | ||
48 | std r10,STK_PARM(r10)(r1); \ | ||
49 | std r0,16(r1); \ | ||
50 | addi r4,r1,STK_PARM(FIRST_REG); \ | ||
51 | stdu r1,-STACK_FRAME_OVERHEAD(r1); \ | ||
52 | bl .__trace_hcall_entry; \ | ||
53 | addi r1,r1,STACK_FRAME_OVERHEAD; \ | ||
54 | ld r0,16(r1); \ | ||
55 | ld r3,STK_PARM(r3)(r1); \ | ||
56 | ld r4,STK_PARM(r4)(r1); \ | ||
57 | ld r5,STK_PARM(r5)(r1); \ | ||
58 | ld r6,STK_PARM(r6)(r1); \ | ||
59 | ld r7,STK_PARM(r7)(r1); \ | ||
60 | ld r8,STK_PARM(r8)(r1); \ | ||
61 | ld r9,STK_PARM(r9)(r1); \ | ||
62 | ld r10,STK_PARM(r10)(r1); \ | ||
63 | mtlr r0; \ | ||
64 | 1: | ||
65 | |||
31 | /* | 66 | /* |
32 | * postcall is performed immediately before function return which | 67 | * postcall is performed immediately before function return which |
33 | * allows liberal use of volatile registers. We branch around this | 68 | * allows liberal use of volatile registers. We branch around this |
34 | * in early init (eg when populating the MMU hashtable) by using an | 69 | * in early init (eg when populating the MMU hashtable) by using an |
35 | * unconditional cpu feature. | 70 | * unconditional cpu feature. |
36 | */ | 71 | */ |
37 | #define HCALL_INST_POSTCALL \ | 72 | #define __HCALL_INST_POSTCALL \ |
38 | BEGIN_FTR_SECTION; \ | 73 | BEGIN_FTR_SECTION; \ |
39 | b 1f; \ | 74 | b 1f; \ |
40 | END_FTR_SECTION(0, 1); \ | 75 | END_FTR_SECTION(0, 1); \ |
41 | ld r4,STK_PARM(r3)(r1); /* validate opcode */ \ | 76 | ld r12,hcall_tracepoint_refcount@toc(r2); \ |
42 | cmpldi cr7,r4,MAX_HCALL_OPCODE; \ | 77 | cmpdi r12,0; \ |
43 | bgt- cr7,1f; \ | 78 | beq+ 1f; \ |
44 | \ | 79 | mflr r0; \ |
45 | /* get time and PURR snapshots after hcall */ \ | 80 | ld r6,STK_PARM(r3)(r1); \ |
46 | mftb r7; /* timebase after */ \ | 81 | std r3,STK_PARM(r3)(r1); \ |
47 | BEGIN_FTR_SECTION; \ | 82 | mr r4,r3; \ |
48 | mfspr r8,SPRN_PURR; /* PURR after */ \ | 83 | mr r3,r6; \ |
49 | ld r6,STK_PARM(r6)(r1); /* PURR before */ \ | 84 | std r0,16(r1); \ |
50 | subf r6,r6,r8; /* delta */ \ | 85 | stdu r1,-STACK_FRAME_OVERHEAD(r1); \ |
51 | END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ | 86 | bl .__trace_hcall_exit; \ |
52 | ld r5,STK_PARM(r5)(r1); /* timebase before */ \ | 87 | addi r1,r1,STACK_FRAME_OVERHEAD; \ |
53 | subf r5,r5,r7; /* time delta */ \ | 88 | ld r0,16(r1); \ |
54 | \ | 89 | ld r3,STK_PARM(r3)(r1); \ |
55 | /* calculate address of stat structure r4 = opcode */ \ | 90 | mtlr r0; \ |
56 | srdi r4,r4,2; /* index into array */ \ | ||
57 | mulli r4,r4,HCALL_STAT_SIZE; \ | ||
58 | LOAD_REG_ADDR(r7, per_cpu__hcall_stats); \ | ||
59 | add r4,r4,r7; \ | ||
60 | ld r7,PACA_DATA_OFFSET(r13); /* per cpu offset */ \ | ||
61 | add r4,r4,r7; \ | ||
62 | \ | ||
63 | /* update stats */ \ | ||
64 | ld r7,HCALL_STAT_CALLS(r4); /* count */ \ | ||
65 | addi r7,r7,1; \ | ||
66 | std r7,HCALL_STAT_CALLS(r4); \ | ||
67 | ld r7,HCALL_STAT_TB(r4); /* timebase */ \ | ||
68 | add r7,r7,r5; \ | ||
69 | std r7,HCALL_STAT_TB(r4); \ | ||
70 | BEGIN_FTR_SECTION; \ | ||
71 | ld r7,HCALL_STAT_PURR(r4); /* PURR */ \ | ||
72 | add r7,r7,r6; \ | ||
73 | std r7,HCALL_STAT_PURR(r4); \ | ||
74 | END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ | ||
75 | 1: | 91 | 1: |
92 | |||
93 | #define HCALL_INST_POSTCALL_NORETS \ | ||
94 | li r5,0; \ | ||
95 | __HCALL_INST_POSTCALL | ||
96 | |||
97 | #define HCALL_INST_POSTCALL(BUFREG) \ | ||
98 | mr r5,BUFREG; \ | ||
99 | __HCALL_INST_POSTCALL | ||
100 | |||
76 | #else | 101 | #else |
77 | #define HCALL_INST_PRECALL | 102 | #define HCALL_INST_PRECALL(FIRST_ARG) |
78 | #define HCALL_INST_POSTCALL | 103 | #define HCALL_INST_POSTCALL_NORETS |
104 | #define HCALL_INST_POSTCALL(BUFREG) | ||
79 | #endif | 105 | #endif |
80 | 106 | ||
81 | .text | 107 | .text |
@@ -86,11 +112,11 @@ _GLOBAL(plpar_hcall_norets) | |||
86 | mfcr r0 | 112 | mfcr r0 |
87 | stw r0,8(r1) | 113 | stw r0,8(r1) |
88 | 114 | ||
89 | HCALL_INST_PRECALL | 115 | HCALL_INST_PRECALL(r4) |
90 | 116 | ||
91 | HVSC /* invoke the hypervisor */ | 117 | HVSC /* invoke the hypervisor */ |
92 | 118 | ||
93 | HCALL_INST_POSTCALL | 119 | HCALL_INST_POSTCALL_NORETS |
94 | 120 | ||
95 | lwz r0,8(r1) | 121 | lwz r0,8(r1) |
96 | mtcrf 0xff,r0 | 122 | mtcrf 0xff,r0 |
@@ -102,7 +128,7 @@ _GLOBAL(plpar_hcall) | |||
102 | mfcr r0 | 128 | mfcr r0 |
103 | stw r0,8(r1) | 129 | stw r0,8(r1) |
104 | 130 | ||
105 | HCALL_INST_PRECALL | 131 | HCALL_INST_PRECALL(r5) |
106 | 132 | ||
107 | std r4,STK_PARM(r4)(r1) /* Save ret buffer */ | 133 | std r4,STK_PARM(r4)(r1) /* Save ret buffer */ |
108 | 134 | ||
@@ -121,7 +147,7 @@ _GLOBAL(plpar_hcall) | |||
121 | std r6, 16(r12) | 147 | std r6, 16(r12) |
122 | std r7, 24(r12) | 148 | std r7, 24(r12) |
123 | 149 | ||
124 | HCALL_INST_POSTCALL | 150 | HCALL_INST_POSTCALL(r12) |
125 | 151 | ||
126 | lwz r0,8(r1) | 152 | lwz r0,8(r1) |
127 | mtcrf 0xff,r0 | 153 | mtcrf 0xff,r0 |
@@ -168,7 +194,7 @@ _GLOBAL(plpar_hcall9) | |||
168 | mfcr r0 | 194 | mfcr r0 |
169 | stw r0,8(r1) | 195 | stw r0,8(r1) |
170 | 196 | ||
171 | HCALL_INST_PRECALL | 197 | HCALL_INST_PRECALL(r5) |
172 | 198 | ||
173 | std r4,STK_PARM(r4)(r1) /* Save ret buffer */ | 199 | std r4,STK_PARM(r4)(r1) /* Save ret buffer */ |
174 | 200 | ||
@@ -196,7 +222,7 @@ _GLOBAL(plpar_hcall9) | |||
196 | std r11,56(r12) | 222 | std r11,56(r12) |
197 | std r0, 64(r12) | 223 | std r0, 64(r12) |
198 | 224 | ||
199 | HCALL_INST_POSTCALL | 225 | HCALL_INST_POSTCALL(r12) |
200 | 226 | ||
201 | lwz r0,8(r1) | 227 | lwz r0,8(r1) |
202 | mtcrf 0xff,r0 | 228 | mtcrf 0xff,r0 |
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c index 3631a4f277eb..2f58c71b7259 100644 --- a/arch/powerpc/platforms/pseries/hvCall_inst.c +++ b/arch/powerpc/platforms/pseries/hvCall_inst.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <asm/hvcall.h> | 26 | #include <asm/hvcall.h> |
27 | #include <asm/firmware.h> | 27 | #include <asm/firmware.h> |
28 | #include <asm/cputable.h> | 28 | #include <asm/cputable.h> |
29 | #include <asm/trace.h> | ||
29 | 30 | ||
30 | DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats); | 31 | DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats); |
31 | 32 | ||
@@ -100,6 +101,35 @@ static const struct file_operations hcall_inst_seq_fops = { | |||
100 | #define HCALL_ROOT_DIR "hcall_inst" | 101 | #define HCALL_ROOT_DIR "hcall_inst" |
101 | #define CPU_NAME_BUF_SIZE 32 | 102 | #define CPU_NAME_BUF_SIZE 32 |
102 | 103 | ||
104 | |||
105 | static void probe_hcall_entry(unsigned long opcode, unsigned long *args) | ||
106 | { | ||
107 | struct hcall_stats *h; | ||
108 | |||
109 | if (opcode > MAX_HCALL_OPCODE) | ||
110 | return; | ||
111 | |||
112 | h = &get_cpu_var(hcall_stats)[opcode / 4]; | ||
113 | h->tb_start = mftb(); | ||
114 | h->purr_start = mfspr(SPRN_PURR); | ||
115 | } | ||
116 | |||
117 | static void probe_hcall_exit(unsigned long opcode, unsigned long retval, | ||
118 | unsigned long *retbuf) | ||
119 | { | ||
120 | struct hcall_stats *h; | ||
121 | |||
122 | if (opcode > MAX_HCALL_OPCODE) | ||
123 | return; | ||
124 | |||
125 | h = &__get_cpu_var(hcall_stats)[opcode / 4]; | ||
126 | h->num_calls++; | ||
127 | h->tb_total = mftb() - h->tb_start; | ||
128 | h->purr_total = mfspr(SPRN_PURR) - h->purr_start; | ||
129 | |||
130 | put_cpu_var(hcall_stats); | ||
131 | } | ||
132 | |||
103 | static int __init hcall_inst_init(void) | 133 | static int __init hcall_inst_init(void) |
104 | { | 134 | { |
105 | struct dentry *hcall_root; | 135 | struct dentry *hcall_root; |
@@ -110,6 +140,14 @@ static int __init hcall_inst_init(void) | |||
110 | if (!firmware_has_feature(FW_FEATURE_LPAR)) | 140 | if (!firmware_has_feature(FW_FEATURE_LPAR)) |
111 | return 0; | 141 | return 0; |
112 | 142 | ||
143 | if (register_trace_hcall_entry(probe_hcall_entry)) | ||
144 | return -EINVAL; | ||
145 | |||
146 | if (register_trace_hcall_exit(probe_hcall_exit)) { | ||
147 | unregister_trace_hcall_entry(probe_hcall_entry); | ||
148 | return -EINVAL; | ||
149 | } | ||
150 | |||
113 | hcall_root = debugfs_create_dir(HCALL_ROOT_DIR, NULL); | 151 | hcall_root = debugfs_create_dir(HCALL_ROOT_DIR, NULL); |
114 | if (!hcall_root) | 152 | if (!hcall_root) |
115 | return -ENOMEM; | 153 | return -ENOMEM; |
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 903eb9eec687..0707653612ba 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <asm/cputable.h> | 39 | #include <asm/cputable.h> |
40 | #include <asm/udbg.h> | 40 | #include <asm/udbg.h> |
41 | #include <asm/smp.h> | 41 | #include <asm/smp.h> |
42 | #include <asm/trace.h> | ||
42 | 43 | ||
43 | #include "plpar_wrappers.h" | 44 | #include "plpar_wrappers.h" |
44 | #include "pseries.h" | 45 | #include "pseries.h" |
@@ -661,3 +662,35 @@ void arch_free_page(struct page *page, int order) | |||
661 | EXPORT_SYMBOL(arch_free_page); | 662 | EXPORT_SYMBOL(arch_free_page); |
662 | 663 | ||
663 | #endif | 664 | #endif |
665 | |||
666 | #ifdef CONFIG_TRACEPOINTS | ||
667 | /* | ||
668 | * We optimise our hcall path by placing hcall_tracepoint_refcount | ||
669 | * directly in the TOC so we can check if the hcall tracepoints are | ||
670 | * enabled via a single load. | ||
671 | */ | ||
672 | |||
673 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ | ||
674 | extern long hcall_tracepoint_refcount; | ||
675 | |||
676 | void hcall_tracepoint_regfunc(void) | ||
677 | { | ||
678 | hcall_tracepoint_refcount++; | ||
679 | } | ||
680 | |||
681 | void hcall_tracepoint_unregfunc(void) | ||
682 | { | ||
683 | hcall_tracepoint_refcount--; | ||
684 | } | ||
685 | |||
686 | void __trace_hcall_entry(unsigned long opcode, unsigned long *args) | ||
687 | { | ||
688 | trace_hcall_entry(opcode, args); | ||
689 | } | ||
690 | |||
691 | void __trace_hcall_exit(long opcode, unsigned long retval, | ||
692 | unsigned long *retbuf) | ||
693 | { | ||
694 | trace_hcall_exit(opcode, retval, retbuf); | ||
695 | } | ||
696 | #endif | ||