aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-kvm.c
diff options
context:
space:
mode:
authorAlexander Yarygin <yarygin@linux.vnet.ibm.com>2014-07-03 10:29:05 -0400
committerArnaldo Carvalho de Melo <acme@redhat.com>2014-07-16 16:57:32 -0400
commit9daa81239e60c162153fb2a365b8492c9a9bf632 (patch)
treea3f2fda3d42857f6c2d5188c308e02474d641eef /tools/perf/builtin-kvm.c
parent44b3802122174ba499613bac3aab2e66e948ce1e (diff)
perf kvm: Move arch specific code into arch/
Parts of a 'perf kvm stat' code make sense only for x86. Let's move this code into the arch/x86/kvm-stat.c file and add util/kvm-stat.h for generic structure definitions. Add a global array 'kvm_reg_events_ops' for accessing the arch-specific 'kvm_events_ops' from generic code. Since the several global arrays (i.e. 'kvm_events_tp') have been moved to arch/*, we can not know their sizes and use them directly in builtin-kvm.c. This patch fixes that problem by adding trimming NULL element to each array and changing the behavior of their handlers in generic code. Reviewed-by: David Ahern <dsahern@gmail.com> Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com> Signed-off-by: Alexander Yarygin <yarygin@linux.vnet.ibm.com> Acked-by: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Cornelia Huck <cornelia.huck@de.ibm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1404397747-20939-3-git-send-email-yarygin@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/builtin-kvm.c')
-rw-r--r--tools/perf/builtin-kvm.c297
1 files changed, 34 insertions, 263 deletions
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 6d73346ef2a6..75ee8c1a6baf 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -31,109 +31,23 @@
31 31
32#ifdef HAVE_KVM_STAT_SUPPORT 32#ifdef HAVE_KVM_STAT_SUPPORT
33#include <asm/kvm_perf.h> 33#include <asm/kvm_perf.h>
34#include "util/kvm-stat.h"
34 35
35struct event_key { 36void exit_event_get_key(struct perf_evsel *evsel,
36 #define INVALID_KEY (~0ULL) 37 struct perf_sample *sample,
37 u64 key; 38 struct event_key *key)
38 int info;
39};
40
41struct kvm_event_stats {
42 u64 time;
43 struct stats stats;
44};
45
46struct kvm_event {
47 struct list_head hash_entry;
48 struct rb_node rb;
49
50 struct event_key key;
51
52 struct kvm_event_stats total;
53
54 #define DEFAULT_VCPU_NUM 8
55 int max_vcpu;
56 struct kvm_event_stats *vcpu;
57};
58
59typedef int (*key_cmp_fun)(struct kvm_event*, struct kvm_event*, int);
60
61struct kvm_event_key {
62 const char *name;
63 key_cmp_fun key;
64};
65
66
67struct perf_kvm_stat;
68
69struct kvm_events_ops {
70 bool (*is_begin_event)(struct perf_evsel *evsel,
71 struct perf_sample *sample,
72 struct event_key *key);
73 bool (*is_end_event)(struct perf_evsel *evsel,
74 struct perf_sample *sample, struct event_key *key);
75 void (*decode_key)(struct perf_kvm_stat *kvm, struct event_key *key,
76 char *decode);
77 const char *name;
78};
79
80struct exit_reasons_table {
81 unsigned long exit_code;
82 const char *reason;
83};
84
85#define EVENTS_BITS 12
86#define EVENTS_CACHE_SIZE (1UL << EVENTS_BITS)
87
88struct perf_kvm_stat {
89 struct perf_tool tool;
90 struct record_opts opts;
91 struct perf_evlist *evlist;
92 struct perf_session *session;
93
94 const char *file_name;
95 const char *report_event;
96 const char *sort_key;
97 int trace_vcpu;
98
99 struct exit_reasons_table *exit_reasons;
100 const char *exit_reasons_isa;
101
102 struct kvm_events_ops *events_ops;
103 key_cmp_fun compare;
104 struct list_head kvm_events_cache[EVENTS_CACHE_SIZE];
105
106 u64 total_time;
107 u64 total_count;
108 u64 lost_events;
109 u64 duration;
110
111 const char *pid_str;
112 struct intlist *pid_list;
113
114 struct rb_root result;
115
116 int timerfd;
117 unsigned int display_time;
118 bool live;
119};
120
121
122static void exit_event_get_key(struct perf_evsel *evsel,
123 struct perf_sample *sample,
124 struct event_key *key)
125{ 39{
126 key->info = 0; 40 key->info = 0;
127 key->key = perf_evsel__intval(evsel, sample, KVM_EXIT_REASON); 41 key->key = perf_evsel__intval(evsel, sample, KVM_EXIT_REASON);
128} 42}
129 43
130static bool kvm_exit_event(struct perf_evsel *evsel) 44bool kvm_exit_event(struct perf_evsel *evsel)
131{ 45{
132 return !strcmp(evsel->name, KVM_EXIT_TRACE); 46 return !strcmp(evsel->name, KVM_EXIT_TRACE);
133} 47}
134 48
135static bool exit_event_begin(struct perf_evsel *evsel, 49bool exit_event_begin(struct perf_evsel *evsel,
136 struct perf_sample *sample, struct event_key *key) 50 struct perf_sample *sample, struct event_key *key)
137{ 51{
138 if (kvm_exit_event(evsel)) { 52 if (kvm_exit_event(evsel)) {
139 exit_event_get_key(evsel, sample, key); 53 exit_event_get_key(evsel, sample, key);
@@ -143,26 +57,18 @@ static bool exit_event_begin(struct perf_evsel *evsel,
143 return false; 57 return false;
144} 58}
145 59
146static bool kvm_entry_event(struct perf_evsel *evsel) 60bool kvm_entry_event(struct perf_evsel *evsel)
147{ 61{
148 return !strcmp(evsel->name, KVM_ENTRY_TRACE); 62 return !strcmp(evsel->name, KVM_ENTRY_TRACE);
149} 63}
150 64
151static bool exit_event_end(struct perf_evsel *evsel, 65bool exit_event_end(struct perf_evsel *evsel,
152 struct perf_sample *sample __maybe_unused, 66 struct perf_sample *sample __maybe_unused,
153 struct event_key *key __maybe_unused) 67 struct event_key *key __maybe_unused)
154{ 68{
155 return kvm_entry_event(evsel); 69 return kvm_entry_event(evsel);
156} 70}
157 71
158#define define_exit_reasons_table(name, symbols) \
159 static struct exit_reasons_table name[] = { \
160 symbols, { -1, NULL } \
161 }
162
163define_exit_reasons_table(vmx_exit_reasons, VMX_EXIT_REASONS);
164define_exit_reasons_table(svm_exit_reasons, SVM_EXIT_REASONS);
165
166static const char *get_exit_reason(struct perf_kvm_stat *kvm, 72static const char *get_exit_reason(struct perf_kvm_stat *kvm,
167 struct exit_reasons_table *tbl, 73 struct exit_reasons_table *tbl,
168 u64 exit_code) 74 u64 exit_code)
@@ -178,9 +84,9 @@ static const char *get_exit_reason(struct perf_kvm_stat *kvm,
178 return "UNKNOWN"; 84 return "UNKNOWN";
179} 85}
180 86
181static void exit_event_decode_key(struct perf_kvm_stat *kvm, 87void exit_event_decode_key(struct perf_kvm_stat *kvm,
182 struct event_key *key, 88 struct event_key *key,
183 char *decode) 89 char *decode)
184{ 90{
185 const char *exit_reason = get_exit_reason(kvm, kvm->exit_reasons, 91 const char *exit_reason = get_exit_reason(kvm, kvm->exit_reasons,
186 key->key); 92 key->key);
@@ -188,139 +94,20 @@ static void exit_event_decode_key(struct perf_kvm_stat *kvm,
188 scnprintf(decode, DECODE_STR_LEN, "%s", exit_reason); 94 scnprintf(decode, DECODE_STR_LEN, "%s", exit_reason);
189} 95}
190 96
191static struct kvm_events_ops exit_events = { 97static bool register_kvm_events_ops(struct perf_kvm_stat *kvm)
192 .is_begin_event = exit_event_begin,
193 .is_end_event = exit_event_end,
194 .decode_key = exit_event_decode_key,
195 .name = "VM-EXIT"
196};
197
198/*
199 * For the mmio events, we treat:
200 * the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry
201 * the time of MMIO read: kvm_exit -> kvm_mmio(KVM_TRACE_MMIO_READ...).
202 */
203static void mmio_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample,
204 struct event_key *key)
205{
206 key->key = perf_evsel__intval(evsel, sample, "gpa");
207 key->info = perf_evsel__intval(evsel, sample, "type");
208}
209
210#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
211#define KVM_TRACE_MMIO_READ 1
212#define KVM_TRACE_MMIO_WRITE 2
213
214static bool mmio_event_begin(struct perf_evsel *evsel,
215 struct perf_sample *sample, struct event_key *key)
216{
217 /* MMIO read begin event in kernel. */
218 if (kvm_exit_event(evsel))
219 return true;
220
221 /* MMIO write begin event in kernel. */
222 if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
223 perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) {
224 mmio_event_get_key(evsel, sample, key);
225 return true;
226 }
227
228 return false;
229}
230
231static bool mmio_event_end(struct perf_evsel *evsel, struct perf_sample *sample,
232 struct event_key *key)
233{
234 /* MMIO write end event in kernel. */
235 if (kvm_entry_event(evsel))
236 return true;
237
238 /* MMIO read end event in kernel.*/
239 if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
240 perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) {
241 mmio_event_get_key(evsel, sample, key);
242 return true;
243 }
244
245 return false;
246}
247
248static void mmio_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
249 struct event_key *key,
250 char *decode)
251{
252 scnprintf(decode, DECODE_STR_LEN, "%#lx:%s", (unsigned long)key->key,
253 key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R");
254}
255
256static struct kvm_events_ops mmio_events = {
257 .is_begin_event = mmio_event_begin,
258 .is_end_event = mmio_event_end,
259 .decode_key = mmio_event_decode_key,
260 .name = "MMIO Access"
261};
262
263 /* The time of emulation pio access is from kvm_pio to kvm_entry. */
264static void ioport_event_get_key(struct perf_evsel *evsel,
265 struct perf_sample *sample,
266 struct event_key *key)
267{ 98{
268 key->key = perf_evsel__intval(evsel, sample, "port"); 99 struct kvm_reg_events_ops *events_ops = kvm_reg_events_ops;
269 key->info = perf_evsel__intval(evsel, sample, "rw");
270}
271 100
272static bool ioport_event_begin(struct perf_evsel *evsel, 101 for (events_ops = kvm_reg_events_ops; events_ops->name; events_ops++) {
273 struct perf_sample *sample, 102 if (!strcmp(events_ops->name, kvm->report_event)) {
274 struct event_key *key) 103 kvm->events_ops = events_ops->ops;
275{ 104 return true;
276 if (!strcmp(evsel->name, "kvm:kvm_pio")) { 105 }
277 ioport_event_get_key(evsel, sample, key);
278 return true;
279 } 106 }
280 107
281 return false; 108 return false;
282} 109}
283 110
284static bool ioport_event_end(struct perf_evsel *evsel,
285 struct perf_sample *sample __maybe_unused,
286 struct event_key *key __maybe_unused)
287{
288 return kvm_entry_event(evsel);
289}
290
291static void ioport_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
292 struct event_key *key,
293 char *decode)
294{
295 scnprintf(decode, DECODE_STR_LEN, "%#llx:%s", (unsigned long long)key->key,
296 key->info ? "POUT" : "PIN");
297}
298
299static struct kvm_events_ops ioport_events = {
300 .is_begin_event = ioport_event_begin,
301 .is_end_event = ioport_event_end,
302 .decode_key = ioport_event_decode_key,
303 .name = "IO Port Access"
304};
305
306static bool register_kvm_events_ops(struct perf_kvm_stat *kvm)
307{
308 bool ret = true;
309
310 if (!strcmp(kvm->report_event, "vmexit"))
311 kvm->events_ops = &exit_events;
312 else if (!strcmp(kvm->report_event, "mmio"))
313 kvm->events_ops = &mmio_events;
314 else if (!strcmp(kvm->report_event, "ioport"))
315 kvm->events_ops = &ioport_events;
316 else {
317 pr_err("Unknown report event:%s\n", kvm->report_event);
318 ret = false;
319 }
320
321 return ret;
322}
323
324struct vcpu_event_record { 111struct vcpu_event_record {
325 int vcpu_id; 112 int vcpu_id;
326 u64 start_time; 113 u64 start_time;
@@ -833,20 +620,6 @@ static int process_sample_event(struct perf_tool *tool,
833 return 0; 620 return 0;
834} 621}
835 622
836static int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
837{
838 if (strstr(cpuid, "Intel")) {
839 kvm->exit_reasons = vmx_exit_reasons;
840 kvm->exit_reasons_isa = "VMX";
841 } else if (strstr(cpuid, "AMD")) {
842 kvm->exit_reasons = svm_exit_reasons;
843 kvm->exit_reasons_isa = "SVM";
844 } else
845 return -ENOTSUP;
846
847 return 0;
848}
849
850static int cpu_isa_config(struct perf_kvm_stat *kvm) 623static int cpu_isa_config(struct perf_kvm_stat *kvm)
851{ 624{
852 char buf[64], *cpuid; 625 char buf[64], *cpuid;
@@ -1305,13 +1078,6 @@ exit:
1305 return ret; 1078 return ret;
1306} 1079}
1307 1080
1308static const char * const kvm_events_tp[] = {
1309 "kvm:kvm_entry",
1310 "kvm:kvm_exit",
1311 "kvm:kvm_mmio",
1312 "kvm:kvm_pio",
1313};
1314
1315#define STRDUP_FAIL_EXIT(s) \ 1081#define STRDUP_FAIL_EXIT(s) \
1316 ({ char *_p; \ 1082 ({ char *_p; \
1317 _p = strdup(s); \ 1083 _p = strdup(s); \
@@ -1323,7 +1089,7 @@ static const char * const kvm_events_tp[] = {
1323static int 1089static int
1324kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv) 1090kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
1325{ 1091{
1326 unsigned int rec_argc, i, j; 1092 unsigned int rec_argc, i, j, events_tp_size;
1327 const char **rec_argv; 1093 const char **rec_argv;
1328 const char * const record_args[] = { 1094 const char * const record_args[] = {
1329 "record", 1095 "record",
@@ -1331,9 +1097,14 @@ kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
1331 "-m", "1024", 1097 "-m", "1024",
1332 "-c", "1", 1098 "-c", "1",
1333 }; 1099 };
1100 const char * const *events_tp;
1101 events_tp_size = 0;
1102
1103 for (events_tp = kvm_events_tp; *events_tp; events_tp++)
1104 events_tp_size++;
1334 1105
1335 rec_argc = ARRAY_SIZE(record_args) + argc + 2 + 1106 rec_argc = ARRAY_SIZE(record_args) + argc + 2 +
1336 2 * ARRAY_SIZE(kvm_events_tp); 1107 2 * events_tp_size;
1337 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 1108 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1338 1109
1339 if (rec_argv == NULL) 1110 if (rec_argv == NULL)
@@ -1342,7 +1113,7 @@ kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
1342 for (i = 0; i < ARRAY_SIZE(record_args); i++) 1113 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1343 rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]); 1114 rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]);
1344 1115
1345 for (j = 0; j < ARRAY_SIZE(kvm_events_tp); j++) { 1116 for (j = 0; j < events_tp_size; j++) {
1346 rec_argv[i++] = "-e"; 1117 rec_argv[i++] = "-e";
1347 rec_argv[i++] = STRDUP_FAIL_EXIT(kvm_events_tp[j]); 1118 rec_argv[i++] = STRDUP_FAIL_EXIT(kvm_events_tp[j]);
1348 } 1119 }
@@ -1396,16 +1167,16 @@ static struct perf_evlist *kvm_live_event_list(void)
1396{ 1167{
1397 struct perf_evlist *evlist; 1168 struct perf_evlist *evlist;
1398 char *tp, *name, *sys; 1169 char *tp, *name, *sys;
1399 unsigned int j;
1400 int err = -1; 1170 int err = -1;
1171 const char * const *events_tp;
1401 1172
1402 evlist = perf_evlist__new(); 1173 evlist = perf_evlist__new();
1403 if (evlist == NULL) 1174 if (evlist == NULL)
1404 return NULL; 1175 return NULL;
1405 1176
1406 for (j = 0; j < ARRAY_SIZE(kvm_events_tp); j++) { 1177 for (events_tp = kvm_events_tp; *events_tp; events_tp++) {
1407 1178
1408 tp = strdup(kvm_events_tp[j]); 1179 tp = strdup(*events_tp);
1409 if (tp == NULL) 1180 if (tp == NULL)
1410 goto out; 1181 goto out;
1411 1182
@@ -1414,7 +1185,7 @@ static struct perf_evlist *kvm_live_event_list(void)
1414 name = strchr(tp, ':'); 1185 name = strchr(tp, ':');
1415 if (name == NULL) { 1186 if (name == NULL) {
1416 pr_err("Error parsing %s tracepoint: subsystem delimiter not found\n", 1187 pr_err("Error parsing %s tracepoint: subsystem delimiter not found\n",
1417 kvm_events_tp[j]); 1188 *events_tp);
1418 free(tp); 1189 free(tp);
1419 goto out; 1190 goto out;
1420 } 1191 }
@@ -1422,7 +1193,7 @@ static struct perf_evlist *kvm_live_event_list(void)
1422 name++; 1193 name++;
1423 1194
1424 if (perf_evlist__add_newtp(evlist, sys, name, NULL)) { 1195 if (perf_evlist__add_newtp(evlist, sys, name, NULL)) {
1425 pr_err("Failed to add %s tracepoint to the list\n", kvm_events_tp[j]); 1196 pr_err("Failed to add %s tracepoint to the list\n", *events_tp);
1426 free(tp); 1197 free(tp);
1427 goto out; 1198 goto out;
1428 } 1199 }