aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/machine.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/machine.c')
-rw-r--r--tools/perf/util/machine.c139
1 files changed, 125 insertions, 14 deletions
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index c73e1fc12e53..16bba9fff2c8 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -8,6 +8,7 @@
8#include "sort.h" 8#include "sort.h"
9#include "strlist.h" 9#include "strlist.h"
10#include "thread.h" 10#include "thread.h"
11#include "vdso.h"
11#include <stdbool.h> 12#include <stdbool.h>
12#include <symbol/kallsyms.h> 13#include <symbol/kallsyms.h>
13#include "unwind.h" 14#include "unwind.h"
@@ -23,6 +24,8 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
23 INIT_LIST_HEAD(&machine->dead_threads); 24 INIT_LIST_HEAD(&machine->dead_threads);
24 machine->last_match = NULL; 25 machine->last_match = NULL;
25 26
27 machine->vdso_info = NULL;
28
26 machine->kmaps.machine = machine; 29 machine->kmaps.machine = machine;
27 machine->pid = pid; 30 machine->pid = pid;
28 31
@@ -34,7 +37,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
34 return -ENOMEM; 37 return -ENOMEM;
35 38
36 if (pid != HOST_KERNEL_ID) { 39 if (pid != HOST_KERNEL_ID) {
37 struct thread *thread = machine__findnew_thread(machine, 0, 40 struct thread *thread = machine__findnew_thread(machine, -1,
38 pid); 41 pid);
39 char comm[64]; 42 char comm[64];
40 43
@@ -45,6 +48,8 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
45 thread__set_comm(thread, comm, 0); 48 thread__set_comm(thread, comm, 0);
46 } 49 }
47 50
51 machine->current_tid = NULL;
52
48 return 0; 53 return 0;
49} 54}
50 55
@@ -103,7 +108,9 @@ void machine__exit(struct machine *machine)
103 map_groups__exit(&machine->kmaps); 108 map_groups__exit(&machine->kmaps);
104 dsos__delete(&machine->user_dsos); 109 dsos__delete(&machine->user_dsos);
105 dsos__delete(&machine->kernel_dsos); 110 dsos__delete(&machine->kernel_dsos);
111 vdso__exit(machine);
106 zfree(&machine->root_dir); 112 zfree(&machine->root_dir);
113 zfree(&machine->current_tid);
107} 114}
108 115
109void machine__delete(struct machine *machine) 116void machine__delete(struct machine *machine)
@@ -272,6 +279,52 @@ void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
272 return; 279 return;
273} 280}
274 281
282static void machine__update_thread_pid(struct machine *machine,
283 struct thread *th, pid_t pid)
284{
285 struct thread *leader;
286
287 if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
288 return;
289
290 th->pid_ = pid;
291
292 if (th->pid_ == th->tid)
293 return;
294
295 leader = machine__findnew_thread(machine, th->pid_, th->pid_);
296 if (!leader)
297 goto out_err;
298
299 if (!leader->mg)
300 leader->mg = map_groups__new();
301
302 if (!leader->mg)
303 goto out_err;
304
305 if (th->mg == leader->mg)
306 return;
307
308 if (th->mg) {
309 /*
310 * Maps are created from MMAP events which provide the pid and
311 * tid. Consequently there never should be any maps on a thread
312 * with an unknown pid. Just print an error if there are.
313 */
314 if (!map_groups__empty(th->mg))
315 pr_err("Discarding thread maps for %d:%d\n",
316 th->pid_, th->tid);
317 map_groups__delete(th->mg);
318 }
319
320 th->mg = map_groups__get(leader->mg);
321
322 return;
323
324out_err:
325 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
326}
327
275static struct thread *__machine__findnew_thread(struct machine *machine, 328static struct thread *__machine__findnew_thread(struct machine *machine,
276 pid_t pid, pid_t tid, 329 pid_t pid, pid_t tid,
277 bool create) 330 bool create)
@@ -285,10 +338,10 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
285 * so most of the time we dont have to look up 338 * so most of the time we dont have to look up
286 * the full rbtree: 339 * the full rbtree:
287 */ 340 */
288 if (machine->last_match && machine->last_match->tid == tid) { 341 th = machine->last_match;
289 if (pid && pid != machine->last_match->pid_) 342 if (th && th->tid == tid) {
290 machine->last_match->pid_ = pid; 343 machine__update_thread_pid(machine, th, pid);
291 return machine->last_match; 344 return th;
292 } 345 }
293 346
294 while (*p != NULL) { 347 while (*p != NULL) {
@@ -297,8 +350,7 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
297 350
298 if (th->tid == tid) { 351 if (th->tid == tid) {
299 machine->last_match = th; 352 machine->last_match = th;
300 if (pid && pid != th->pid_) 353 machine__update_thread_pid(machine, th, pid);
301 th->pid_ = pid;
302 return th; 354 return th;
303 } 355 }
304 356
@@ -325,8 +377,10 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
325 * within thread__init_map_groups to find the thread 377 * within thread__init_map_groups to find the thread
326 * leader and that would screwed the rb tree. 378 * leader and that would screwed the rb tree.
327 */ 379 */
328 if (thread__init_map_groups(th, machine)) 380 if (thread__init_map_groups(th, machine)) {
381 thread__delete(th);
329 return NULL; 382 return NULL;
383 }
330 } 384 }
331 385
332 return th; 386 return th;
@@ -1045,14 +1099,14 @@ int machine__process_mmap2_event(struct machine *machine,
1045 else 1099 else
1046 type = MAP__FUNCTION; 1100 type = MAP__FUNCTION;
1047 1101
1048 map = map__new(&machine->user_dsos, event->mmap2.start, 1102 map = map__new(machine, event->mmap2.start,
1049 event->mmap2.len, event->mmap2.pgoff, 1103 event->mmap2.len, event->mmap2.pgoff,
1050 event->mmap2.pid, event->mmap2.maj, 1104 event->mmap2.pid, event->mmap2.maj,
1051 event->mmap2.min, event->mmap2.ino, 1105 event->mmap2.min, event->mmap2.ino,
1052 event->mmap2.ino_generation, 1106 event->mmap2.ino_generation,
1053 event->mmap2.prot, 1107 event->mmap2.prot,
1054 event->mmap2.flags, 1108 event->mmap2.flags,
1055 event->mmap2.filename, type); 1109 event->mmap2.filename, type, thread);
1056 1110
1057 if (map == NULL) 1111 if (map == NULL)
1058 goto out_problem; 1112 goto out_problem;
@@ -1095,11 +1149,11 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
1095 else 1149 else
1096 type = MAP__FUNCTION; 1150 type = MAP__FUNCTION;
1097 1151
1098 map = map__new(&machine->user_dsos, event->mmap.start, 1152 map = map__new(machine, event->mmap.start,
1099 event->mmap.len, event->mmap.pgoff, 1153 event->mmap.len, event->mmap.pgoff,
1100 event->mmap.pid, 0, 0, 0, 0, 0, 0, 1154 event->mmap.pid, 0, 0, 0, 0, 0, 0,
1101 event->mmap.filename, 1155 event->mmap.filename,
1102 type); 1156 type, thread);
1103 1157
1104 if (map == NULL) 1158 if (map == NULL)
1105 goto out_problem; 1159 goto out_problem;
@@ -1281,7 +1335,9 @@ static int machine__resolve_callchain_sample(struct machine *machine,
1281 u8 cpumode = PERF_RECORD_MISC_USER; 1335 u8 cpumode = PERF_RECORD_MISC_USER;
1282 int chain_nr = min(max_stack, (int)chain->nr); 1336 int chain_nr = min(max_stack, (int)chain->nr);
1283 int i; 1337 int i;
1338 int j;
1284 int err; 1339 int err;
1340 int skip_idx __maybe_unused;
1285 1341
1286 callchain_cursor_reset(&callchain_cursor); 1342 callchain_cursor_reset(&callchain_cursor);
1287 1343
@@ -1290,14 +1346,26 @@ static int machine__resolve_callchain_sample(struct machine *machine,
1290 return 0; 1346 return 0;
1291 } 1347 }
1292 1348
1349 /*
1350 * Based on DWARF debug information, some architectures skip
1351 * a callchain entry saved by the kernel.
1352 */
1353 skip_idx = arch_skip_callchain_idx(machine, thread, chain);
1354
1293 for (i = 0; i < chain_nr; i++) { 1355 for (i = 0; i < chain_nr; i++) {
1294 u64 ip; 1356 u64 ip;
1295 struct addr_location al; 1357 struct addr_location al;
1296 1358
1297 if (callchain_param.order == ORDER_CALLEE) 1359 if (callchain_param.order == ORDER_CALLEE)
1298 ip = chain->ips[i]; 1360 j = i;
1299 else 1361 else
1300 ip = chain->ips[chain->nr - i - 1]; 1362 j = chain->nr - i - 1;
1363
1364#ifdef HAVE_SKIP_CALLCHAIN_IDX
1365 if (j == skip_idx)
1366 continue;
1367#endif
1368 ip = chain->ips[j];
1301 1369
1302 if (ip >= PERF_CONTEXT_MAX) { 1370 if (ip >= PERF_CONTEXT_MAX) {
1303 switch (ip) { 1371 switch (ip) {
@@ -1420,3 +1488,46 @@ int __machine__synthesize_threads(struct machine *machine, struct perf_tool *too
1420 /* command specified */ 1488 /* command specified */
1421 return 0; 1489 return 0;
1422} 1490}
1491
1492pid_t machine__get_current_tid(struct machine *machine, int cpu)
1493{
1494 if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
1495 return -1;
1496
1497 return machine->current_tid[cpu];
1498}
1499
1500int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
1501 pid_t tid)
1502{
1503 struct thread *thread;
1504
1505 if (cpu < 0)
1506 return -EINVAL;
1507
1508 if (!machine->current_tid) {
1509 int i;
1510
1511 machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
1512 if (!machine->current_tid)
1513 return -ENOMEM;
1514 for (i = 0; i < MAX_NR_CPUS; i++)
1515 machine->current_tid[i] = -1;
1516 }
1517
1518 if (cpu >= MAX_NR_CPUS) {
1519 pr_err("Requested CPU %d too large. ", cpu);
1520 pr_err("Consider raising MAX_NR_CPUS\n");
1521 return -EINVAL;
1522 }
1523
1524 machine->current_tid[cpu] = tid;
1525
1526 thread = machine__findnew_thread(machine, pid, tid);
1527 if (!thread)
1528 return -ENOMEM;
1529
1530 thread->cpu = cpu;
1531
1532 return 0;
1533}