aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/pseries
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/pseries')
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S132
-rw-r--r--arch/powerpc/platforms/pseries/hvCall_inst.c38
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c33
3 files changed, 150 insertions, 53 deletions
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index c1427b3634ec..383a5d0e9818 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -14,68 +14,94 @@
14 14
15#define STK_PARM(i) (48 + ((i)-3)*8) 15#define STK_PARM(i) (48 + ((i)-3)*8)
16 16
17#ifdef CONFIG_HCALL_STATS 17#ifdef CONFIG_TRACEPOINTS
18
19 .section ".toc","aw"
20
21 .globl hcall_tracepoint_refcount
22hcall_tracepoint_refcount:
23 .llong 0
24
25 .section ".text"
26
18/* 27/*
19 * precall must preserve all registers. use unused STK_PARM() 28 * precall must preserve all registers. use unused STK_PARM()
20 * areas to save snapshots and opcode. 29 * areas to save snapshots and opcode. We branch around this
30 * in early init (eg when populating the MMU hashtable) by using an
31 * unconditional cpu feature.
21 */ 32 */
22#define HCALL_INST_PRECALL \ 33#define HCALL_INST_PRECALL(FIRST_REG) \
23 std r3,STK_PARM(r3)(r1); /* save opcode */ \
24 mftb r0; /* get timebase and */ \
25 std r0,STK_PARM(r5)(r1); /* save for later */ \
26BEGIN_FTR_SECTION; \ 34BEGIN_FTR_SECTION; \
27 mfspr r0,SPRN_PURR; /* get PURR and */ \ 35 b 1f; \
28 std r0,STK_PARM(r6)(r1); /* save for later */ \ 36END_FTR_SECTION(0, 1); \
29END_FTR_SECTION_IFSET(CPU_FTR_PURR); 37 ld r12,hcall_tracepoint_refcount@toc(r2); \
30 38 cmpdi r12,0; \
39 beq+ 1f; \
40 mflr r0; \
41 std r3,STK_PARM(r3)(r1); \
42 std r4,STK_PARM(r4)(r1); \
43 std r5,STK_PARM(r5)(r1); \
44 std r6,STK_PARM(r6)(r1); \
45 std r7,STK_PARM(r7)(r1); \
46 std r8,STK_PARM(r8)(r1); \
47 std r9,STK_PARM(r9)(r1); \
48 std r10,STK_PARM(r10)(r1); \
49 std r0,16(r1); \
50 addi r4,r1,STK_PARM(FIRST_REG); \
51 stdu r1,-STACK_FRAME_OVERHEAD(r1); \
52 bl .__trace_hcall_entry; \
53 addi r1,r1,STACK_FRAME_OVERHEAD; \
54 ld r0,16(r1); \
55 ld r3,STK_PARM(r3)(r1); \
56 ld r4,STK_PARM(r4)(r1); \
57 ld r5,STK_PARM(r5)(r1); \
58 ld r6,STK_PARM(r6)(r1); \
59 ld r7,STK_PARM(r7)(r1); \
60 ld r8,STK_PARM(r8)(r1); \
61 ld r9,STK_PARM(r9)(r1); \
62 ld r10,STK_PARM(r10)(r1); \
63 mtlr r0; \
641:
65
31/* 66/*
32 * postcall is performed immediately before function return which 67 * postcall is performed immediately before function return which
33 * allows liberal use of volatile registers. We branch around this 68 * allows liberal use of volatile registers. We branch around this
34 * in early init (eg when populating the MMU hashtable) by using an 69 * in early init (eg when populating the MMU hashtable) by using an
35 * unconditional cpu feature. 70 * unconditional cpu feature.
36 */ 71 */
37#define HCALL_INST_POSTCALL \ 72#define __HCALL_INST_POSTCALL \
38BEGIN_FTR_SECTION; \ 73BEGIN_FTR_SECTION; \
39 b 1f; \ 74 b 1f; \
40END_FTR_SECTION(0, 1); \ 75END_FTR_SECTION(0, 1); \
41 ld r4,STK_PARM(r3)(r1); /* validate opcode */ \ 76 ld r12,hcall_tracepoint_refcount@toc(r2); \
42 cmpldi cr7,r4,MAX_HCALL_OPCODE; \ 77 cmpdi r12,0; \
43 bgt- cr7,1f; \ 78 beq+ 1f; \
44 \ 79 mflr r0; \
45 /* get time and PURR snapshots after hcall */ \ 80 ld r6,STK_PARM(r3)(r1); \
46 mftb r7; /* timebase after */ \ 81 std r3,STK_PARM(r3)(r1); \
47BEGIN_FTR_SECTION; \ 82 mr r4,r3; \
48 mfspr r8,SPRN_PURR; /* PURR after */ \ 83 mr r3,r6; \
49 ld r6,STK_PARM(r6)(r1); /* PURR before */ \ 84 std r0,16(r1); \
50 subf r6,r6,r8; /* delta */ \ 85 stdu r1,-STACK_FRAME_OVERHEAD(r1); \
51END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ 86 bl .__trace_hcall_exit; \
52 ld r5,STK_PARM(r5)(r1); /* timebase before */ \ 87 addi r1,r1,STACK_FRAME_OVERHEAD; \
53 subf r5,r5,r7; /* time delta */ \ 88 ld r0,16(r1); \
54 \ 89 ld r3,STK_PARM(r3)(r1); \
55 /* calculate address of stat structure r4 = opcode */ \ 90 mtlr r0; \
56 srdi r4,r4,2; /* index into array */ \
57 mulli r4,r4,HCALL_STAT_SIZE; \
58 LOAD_REG_ADDR(r7, per_cpu__hcall_stats); \
59 add r4,r4,r7; \
60 ld r7,PACA_DATA_OFFSET(r13); /* per cpu offset */ \
61 add r4,r4,r7; \
62 \
63 /* update stats */ \
64 ld r7,HCALL_STAT_CALLS(r4); /* count */ \
65 addi r7,r7,1; \
66 std r7,HCALL_STAT_CALLS(r4); \
67 ld r7,HCALL_STAT_TB(r4); /* timebase */ \
68 add r7,r7,r5; \
69 std r7,HCALL_STAT_TB(r4); \
70BEGIN_FTR_SECTION; \
71 ld r7,HCALL_STAT_PURR(r4); /* PURR */ \
72 add r7,r7,r6; \
73 std r7,HCALL_STAT_PURR(r4); \
74END_FTR_SECTION_IFSET(CPU_FTR_PURR); \
751: 911:
92
93#define HCALL_INST_POSTCALL_NORETS \
94 li r5,0; \
95 __HCALL_INST_POSTCALL
96
97#define HCALL_INST_POSTCALL(BUFREG) \
98 mr r5,BUFREG; \
99 __HCALL_INST_POSTCALL
100
76#else 101#else
77#define HCALL_INST_PRECALL 102#define HCALL_INST_PRECALL(FIRST_ARG)
78#define HCALL_INST_POSTCALL 103#define HCALL_INST_POSTCALL_NORETS
104#define HCALL_INST_POSTCALL(BUFREG)
79#endif 105#endif
80 106
81 .text 107 .text
@@ -86,11 +112,11 @@ _GLOBAL(plpar_hcall_norets)
86 mfcr r0 112 mfcr r0
87 stw r0,8(r1) 113 stw r0,8(r1)
88 114
89 HCALL_INST_PRECALL 115 HCALL_INST_PRECALL(r4)
90 116
91 HVSC /* invoke the hypervisor */ 117 HVSC /* invoke the hypervisor */
92 118
93 HCALL_INST_POSTCALL 119 HCALL_INST_POSTCALL_NORETS
94 120
95 lwz r0,8(r1) 121 lwz r0,8(r1)
96 mtcrf 0xff,r0 122 mtcrf 0xff,r0
@@ -102,7 +128,7 @@ _GLOBAL(plpar_hcall)
102 mfcr r0 128 mfcr r0
103 stw r0,8(r1) 129 stw r0,8(r1)
104 130
105 HCALL_INST_PRECALL 131 HCALL_INST_PRECALL(r5)
106 132
107 std r4,STK_PARM(r4)(r1) /* Save ret buffer */ 133 std r4,STK_PARM(r4)(r1) /* Save ret buffer */
108 134
@@ -121,7 +147,7 @@ _GLOBAL(plpar_hcall)
121 std r6, 16(r12) 147 std r6, 16(r12)
122 std r7, 24(r12) 148 std r7, 24(r12)
123 149
124 HCALL_INST_POSTCALL 150 HCALL_INST_POSTCALL(r12)
125 151
126 lwz r0,8(r1) 152 lwz r0,8(r1)
127 mtcrf 0xff,r0 153 mtcrf 0xff,r0
@@ -168,7 +194,7 @@ _GLOBAL(plpar_hcall9)
168 mfcr r0 194 mfcr r0
169 stw r0,8(r1) 195 stw r0,8(r1)
170 196
171 HCALL_INST_PRECALL 197 HCALL_INST_PRECALL(r5)
172 198
173 std r4,STK_PARM(r4)(r1) /* Save ret buffer */ 199 std r4,STK_PARM(r4)(r1) /* Save ret buffer */
174 200
@@ -196,7 +222,7 @@ _GLOBAL(plpar_hcall9)
196 std r11,56(r12) 222 std r11,56(r12)
197 std r0, 64(r12) 223 std r0, 64(r12)
198 224
199 HCALL_INST_POSTCALL 225 HCALL_INST_POSTCALL(r12)
200 226
201 lwz r0,8(r1) 227 lwz r0,8(r1)
202 mtcrf 0xff,r0 228 mtcrf 0xff,r0
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index 3631a4f277eb..2f58c71b7259 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -26,6 +26,7 @@
26#include <asm/hvcall.h> 26#include <asm/hvcall.h>
27#include <asm/firmware.h> 27#include <asm/firmware.h>
28#include <asm/cputable.h> 28#include <asm/cputable.h>
29#include <asm/trace.h>
29 30
30DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats); 31DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats);
31 32
@@ -100,6 +101,35 @@ static const struct file_operations hcall_inst_seq_fops = {
100#define HCALL_ROOT_DIR "hcall_inst" 101#define HCALL_ROOT_DIR "hcall_inst"
101#define CPU_NAME_BUF_SIZE 32 102#define CPU_NAME_BUF_SIZE 32
102 103
104
105static void probe_hcall_entry(unsigned long opcode, unsigned long *args)
106{
107 struct hcall_stats *h;
108
109 if (opcode > MAX_HCALL_OPCODE)
110 return;
111
112 h = &get_cpu_var(hcall_stats)[opcode / 4];
113 h->tb_start = mftb();
114 h->purr_start = mfspr(SPRN_PURR);
115}
116
117static void probe_hcall_exit(unsigned long opcode, unsigned long retval,
118 unsigned long *retbuf)
119{
120 struct hcall_stats *h;
121
122 if (opcode > MAX_HCALL_OPCODE)
123 return;
124
125 h = &__get_cpu_var(hcall_stats)[opcode / 4];
126 h->num_calls++;
127 h->tb_total = mftb() - h->tb_start;
128 h->purr_total = mfspr(SPRN_PURR) - h->purr_start;
129
130 put_cpu_var(hcall_stats);
131}
132
103static int __init hcall_inst_init(void) 133static int __init hcall_inst_init(void)
104{ 134{
105 struct dentry *hcall_root; 135 struct dentry *hcall_root;
@@ -110,6 +140,14 @@ static int __init hcall_inst_init(void)
110 if (!firmware_has_feature(FW_FEATURE_LPAR)) 140 if (!firmware_has_feature(FW_FEATURE_LPAR))
111 return 0; 141 return 0;
112 142
143 if (register_trace_hcall_entry(probe_hcall_entry))
144 return -EINVAL;
145
146 if (register_trace_hcall_exit(probe_hcall_exit)) {
147 unregister_trace_hcall_entry(probe_hcall_entry);
148 return -EINVAL;
149 }
150
113 hcall_root = debugfs_create_dir(HCALL_ROOT_DIR, NULL); 151 hcall_root = debugfs_create_dir(HCALL_ROOT_DIR, NULL);
114 if (!hcall_root) 152 if (!hcall_root)
115 return -ENOMEM; 153 return -ENOMEM;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 903eb9eec687..0707653612ba 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -39,6 +39,7 @@
39#include <asm/cputable.h> 39#include <asm/cputable.h>
40#include <asm/udbg.h> 40#include <asm/udbg.h>
41#include <asm/smp.h> 41#include <asm/smp.h>
42#include <asm/trace.h>
42 43
43#include "plpar_wrappers.h" 44#include "plpar_wrappers.h"
44#include "pseries.h" 45#include "pseries.h"
@@ -661,3 +662,35 @@ void arch_free_page(struct page *page, int order)
661EXPORT_SYMBOL(arch_free_page); 662EXPORT_SYMBOL(arch_free_page);
662 663
663#endif 664#endif
665
666#ifdef CONFIG_TRACEPOINTS
667/*
668 * We optimise our hcall path by placing hcall_tracepoint_refcount
669 * directly in the TOC so we can check if the hcall tracepoints are
670 * enabled via a single load.
671 */
672
673/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
674extern long hcall_tracepoint_refcount;
675
676void hcall_tracepoint_regfunc(void)
677{
678 hcall_tracepoint_refcount++;
679}
680
681void hcall_tracepoint_unregfunc(void)
682{
683 hcall_tracepoint_refcount--;
684}
685
686void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
687{
688 trace_hcall_entry(opcode, args);
689}
690
691void __trace_hcall_exit(long opcode, unsigned long retval,
692 unsigned long *retbuf)
693{
694 trace_hcall_exit(opcode, retval, retbuf);
695}
696#endif