diff options
author | Michael Ellerman <mpe@ellerman.id.au> | 2014-06-10 08:23:10 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2014-06-11 03:03:58 -0400 |
commit | 3752e453f6bafd78e5586cc2b2e33ee4b6e1566d (patch) | |
tree | 2c5d88b6d9f759844249a36ce8b729dcb5c274ad /tools | |
parent | 33b4819f3b93bbcb934e02cbc64ff3c5e9d0149b (diff) |
selftests/powerpc: Add tests of PMU EBBs
The Power8 Performance Monitor Unit (PMU) has a new feature called Event
Based Branches (EBB). This commit adds tests of the kernel API for using
EBBs.
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'tools')
34 files changed, 3913 insertions, 4 deletions
diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile index 7216f0091655..b9ff0db42c79 100644 --- a/tools/testing/selftests/powerpc/pmu/Makefile +++ b/tools/testing/selftests/powerpc/pmu/Makefile | |||
@@ -4,7 +4,7 @@ noarg: | |||
4 | PROGS := count_instructions | 4 | PROGS := count_instructions |
5 | EXTRA_SOURCES := ../harness.c event.c | 5 | EXTRA_SOURCES := ../harness.c event.c |
6 | 6 | ||
7 | all: $(PROGS) | 7 | all: $(PROGS) sub_all |
8 | 8 | ||
9 | $(PROGS): $(EXTRA_SOURCES) | 9 | $(PROGS): $(EXTRA_SOURCES) |
10 | 10 | ||
@@ -12,12 +12,30 @@ $(PROGS): $(EXTRA_SOURCES) | |||
12 | count_instructions: loop.S count_instructions.c $(EXTRA_SOURCES) | 12 | count_instructions: loop.S count_instructions.c $(EXTRA_SOURCES) |
13 | $(CC) $(CFLAGS) -m64 -o $@ $^ | 13 | $(CC) $(CFLAGS) -m64 -o $@ $^ |
14 | 14 | ||
15 | run_tests: all | 15 | run_tests: all sub_run_tests |
16 | @-for PROG in $(PROGS); do \ | 16 | @-for PROG in $(PROGS); do \ |
17 | ./$$PROG; \ | 17 | ./$$PROG; \ |
18 | done; | 18 | done; |
19 | 19 | ||
20 | clean: | 20 | clean: sub_clean |
21 | rm -f $(PROGS) loop.o | 21 | rm -f $(PROGS) loop.o |
22 | 22 | ||
23 | .PHONY: all run_tests clean | 23 | |
24 | SUB_TARGETS = ebb | ||
25 | |||
26 | sub_all: | ||
27 | @for TARGET in $(SUB_TARGETS); do \ | ||
28 | $(MAKE) -C $$TARGET all; \ | ||
29 | done; | ||
30 | |||
31 | sub_run_tests: all | ||
32 | @for TARGET in $(SUB_TARGETS); do \ | ||
33 | $(MAKE) -C $$TARGET run_tests; \ | ||
34 | done; | ||
35 | |||
36 | sub_clean: | ||
37 | @for TARGET in $(SUB_TARGETS); do \ | ||
38 | $(MAKE) -C $$TARGET clean; \ | ||
39 | done; | ||
40 | |||
41 | .PHONY: all run_tests clean sub_all sub_run_tests sub_clean | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile new file mode 100644 index 000000000000..edbba2affc2c --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile | |||
@@ -0,0 +1,32 @@ | |||
1 | noarg: | ||
2 | $(MAKE) -C ../../ | ||
3 | |||
4 | # The EBB handler is 64-bit code and everything links against it | ||
5 | CFLAGS += -m64 | ||
6 | |||
7 | PROGS := reg_access_test event_attributes_test cycles_test \ | ||
8 | cycles_with_freeze_test pmc56_overflow_test \ | ||
9 | ebb_vs_cpu_event_test cpu_event_vs_ebb_test \ | ||
10 | cpu_event_pinned_vs_ebb_test task_event_vs_ebb_test \ | ||
11 | task_event_pinned_vs_ebb_test multi_ebb_procs_test \ | ||
12 | multi_counter_test pmae_handling_test \ | ||
13 | close_clears_pmcc_test instruction_count_test \ | ||
14 | fork_cleanup_test ebb_on_child_test \ | ||
15 | ebb_on_willing_child_test back_to_back_ebbs_test \ | ||
16 | lost_exception_test no_handler_test | ||
17 | |||
18 | all: $(PROGS) | ||
19 | |||
20 | $(PROGS): ../../harness.c ../event.c ../lib.c ebb.c ebb_handler.S trace.c | ||
21 | |||
22 | instruction_count_test: ../loop.S | ||
23 | |||
24 | lost_exception_test: ../lib.c | ||
25 | |||
26 | run_tests: all | ||
27 | @-for PROG in $(PROGS); do \ | ||
28 | ./$$PROG; \ | ||
29 | done; | ||
30 | |||
31 | clean: | ||
32 | rm -f $(PROGS) | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c new file mode 100644 index 000000000000..66ea765c0e72 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c | |||
@@ -0,0 +1,106 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <stdbool.h> | ||
7 | #include <stdio.h> | ||
8 | #include <stdlib.h> | ||
9 | |||
10 | #include "ebb.h" | ||
11 | |||
12 | |||
13 | #define NUMBER_OF_EBBS 50 | ||
14 | |||
15 | /* | ||
16 | * Test that if we overflow the counter while in the EBB handler, we take | ||
17 | * another EBB on exiting from the handler. | ||
18 | * | ||
19 | * We do this by counting with a stupidly low sample period, causing us to | ||
20 | * overflow the PMU while we're still in the EBB handler, leading to another | ||
21 | * EBB. | ||
22 | * | ||
23 | * We get out of what would otherwise be an infinite loop by leaving the | ||
24 | * counter frozen once we've taken enough EBBs. | ||
25 | */ | ||
26 | |||
27 | static void ebb_callee(void) | ||
28 | { | ||
29 | uint64_t siar, val; | ||
30 | |||
31 | val = mfspr(SPRN_BESCR); | ||
32 | if (!(val & BESCR_PMEO)) { | ||
33 | ebb_state.stats.spurious++; | ||
34 | goto out; | ||
35 | } | ||
36 | |||
37 | ebb_state.stats.ebb_count++; | ||
38 | trace_log_counter(ebb_state.trace, ebb_state.stats.ebb_count); | ||
39 | |||
40 | /* Resets the PMC */ | ||
41 | count_pmc(1, sample_period); | ||
42 | |||
43 | out: | ||
44 | if (ebb_state.stats.ebb_count == NUMBER_OF_EBBS) | ||
45 | /* Reset but leave counters frozen */ | ||
46 | reset_ebb_with_clear_mask(MMCR0_PMAO); | ||
47 | else | ||
48 | /* Unfreezes */ | ||
49 | reset_ebb(); | ||
50 | |||
51 | /* Do some stuff to chew some cycles and pop the counter */ | ||
52 | siar = mfspr(SPRN_SIAR); | ||
53 | trace_log_reg(ebb_state.trace, SPRN_SIAR, siar); | ||
54 | |||
55 | val = mfspr(SPRN_PMC1); | ||
56 | trace_log_reg(ebb_state.trace, SPRN_PMC1, val); | ||
57 | |||
58 | val = mfspr(SPRN_MMCR0); | ||
59 | trace_log_reg(ebb_state.trace, SPRN_MMCR0, val); | ||
60 | } | ||
61 | |||
62 | int back_to_back_ebbs(void) | ||
63 | { | ||
64 | struct event event; | ||
65 | |||
66 | event_init_named(&event, 0x1001e, "cycles"); | ||
67 | event_leader_ebb_init(&event); | ||
68 | |||
69 | event.attr.exclude_kernel = 1; | ||
70 | event.attr.exclude_hv = 1; | ||
71 | event.attr.exclude_idle = 1; | ||
72 | |||
73 | FAIL_IF(event_open(&event)); | ||
74 | |||
75 | setup_ebb_handler(ebb_callee); | ||
76 | |||
77 | FAIL_IF(ebb_event_enable(&event)); | ||
78 | |||
79 | sample_period = 5; | ||
80 | |||
81 | ebb_freeze_pmcs(); | ||
82 | mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); | ||
83 | ebb_global_enable(); | ||
84 | ebb_unfreeze_pmcs(); | ||
85 | |||
86 | while (ebb_state.stats.ebb_count < NUMBER_OF_EBBS) | ||
87 | FAIL_IF(core_busy_loop()); | ||
88 | |||
89 | ebb_global_disable(); | ||
90 | ebb_freeze_pmcs(); | ||
91 | |||
92 | count_pmc(1, sample_period); | ||
93 | |||
94 | dump_ebb_state(); | ||
95 | |||
96 | event_close(&event); | ||
97 | |||
98 | FAIL_IF(ebb_state.stats.ebb_count != NUMBER_OF_EBBS); | ||
99 | |||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | int main(void) | ||
104 | { | ||
105 | return test_harness(back_to_back_ebbs, "back_to_back_ebbs"); | ||
106 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/close_clears_pmcc_test.c b/tools/testing/selftests/powerpc/pmu/ebb/close_clears_pmcc_test.c new file mode 100644 index 000000000000..0f0423dba18b --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/close_clears_pmcc_test.c | |||
@@ -0,0 +1,59 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <stdio.h> | ||
7 | #include <stdlib.h> | ||
8 | #include <setjmp.h> | ||
9 | #include <signal.h> | ||
10 | |||
11 | #include "ebb.h" | ||
12 | |||
13 | |||
14 | /* | ||
15 | * Test that closing the EBB event clears MMCR0_PMCC, preventing further access | ||
16 | * by userspace to the PMU hardware. | ||
17 | */ | ||
18 | |||
19 | int close_clears_pmcc(void) | ||
20 | { | ||
21 | struct event event; | ||
22 | |||
23 | event_init_named(&event, 0x1001e, "cycles"); | ||
24 | event_leader_ebb_init(&event); | ||
25 | |||
26 | FAIL_IF(event_open(&event)); | ||
27 | |||
28 | ebb_enable_pmc_counting(1); | ||
29 | setup_ebb_handler(standard_ebb_callee); | ||
30 | ebb_global_enable(); | ||
31 | FAIL_IF(ebb_event_enable(&event)); | ||
32 | |||
33 | mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); | ||
34 | |||
35 | while (ebb_state.stats.ebb_count < 1) | ||
36 | FAIL_IF(core_busy_loop()); | ||
37 | |||
38 | ebb_global_disable(); | ||
39 | event_close(&event); | ||
40 | |||
41 | FAIL_IF(ebb_state.stats.ebb_count == 0); | ||
42 | |||
43 | /* The real test is here, do we take a SIGILL when writing PMU regs now | ||
44 | * that we have closed the event. We expect that we will. */ | ||
45 | |||
46 | FAIL_IF(catch_sigill(write_pmc1)); | ||
47 | |||
48 | /* We should still be able to read EBB regs though */ | ||
49 | mfspr(SPRN_EBBHR); | ||
50 | mfspr(SPRN_EBBRR); | ||
51 | mfspr(SPRN_BESCR); | ||
52 | |||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | int main(void) | ||
57 | { | ||
58 | return test_harness(close_clears_pmcc, "close_clears_pmcc"); | ||
59 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c new file mode 100644 index 000000000000..d3ed64d5d6c0 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c | |||
@@ -0,0 +1,93 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <signal.h> | ||
7 | #include <stdio.h> | ||
8 | #include <stdlib.h> | ||
9 | #include <stdbool.h> | ||
10 | #include <sys/types.h> | ||
11 | #include <sys/wait.h> | ||
12 | #include <unistd.h> | ||
13 | |||
14 | #include "ebb.h" | ||
15 | |||
16 | |||
17 | /* | ||
18 | * Tests a pinned cpu event vs an EBB - in that order. The pinned cpu event | ||
19 | * should remain and the EBB event should fail to enable. | ||
20 | */ | ||
21 | |||
22 | static int setup_cpu_event(struct event *event, int cpu) | ||
23 | { | ||
24 | event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); | ||
25 | |||
26 | event->attr.pinned = 1; | ||
27 | |||
28 | event->attr.exclude_kernel = 1; | ||
29 | event->attr.exclude_hv = 1; | ||
30 | event->attr.exclude_idle = 1; | ||
31 | |||
32 | SKIP_IF(require_paranoia_below(1)); | ||
33 | FAIL_IF(event_open_with_cpu(event, cpu)); | ||
34 | FAIL_IF(event_enable(event)); | ||
35 | |||
36 | return 0; | ||
37 | } | ||
38 | |||
39 | int cpu_event_pinned_vs_ebb(void) | ||
40 | { | ||
41 | union pipe read_pipe, write_pipe; | ||
42 | struct event event; | ||
43 | int cpu, rc; | ||
44 | pid_t pid; | ||
45 | |||
46 | cpu = pick_online_cpu(); | ||
47 | FAIL_IF(cpu < 0); | ||
48 | FAIL_IF(bind_to_cpu(cpu)); | ||
49 | |||
50 | FAIL_IF(pipe(read_pipe.fds) == -1); | ||
51 | FAIL_IF(pipe(write_pipe.fds) == -1); | ||
52 | |||
53 | pid = fork(); | ||
54 | if (pid == 0) { | ||
55 | /* NB order of pipes looks reversed */ | ||
56 | exit(ebb_child(write_pipe, read_pipe)); | ||
57 | } | ||
58 | |||
59 | /* We setup the cpu event first */ | ||
60 | rc = setup_cpu_event(&event, cpu); | ||
61 | if (rc) { | ||
62 | kill_child_and_wait(pid); | ||
63 | return rc; | ||
64 | } | ||
65 | |||
66 | /* Signal the child to install its EBB event and wait */ | ||
67 | if (sync_with_child(read_pipe, write_pipe)) | ||
68 | /* If it fails, wait for it to exit */ | ||
69 | goto wait; | ||
70 | |||
71 | /* Signal the child to run */ | ||
72 | FAIL_IF(sync_with_child(read_pipe, write_pipe)); | ||
73 | |||
74 | wait: | ||
75 | /* We expect it to fail to read the event */ | ||
76 | FAIL_IF(wait_for_child(pid) != 2); | ||
77 | |||
78 | FAIL_IF(event_disable(&event)); | ||
79 | FAIL_IF(event_read(&event)); | ||
80 | |||
81 | event_report(&event); | ||
82 | |||
83 | /* The cpu event should have run */ | ||
84 | FAIL_IF(event.result.value == 0); | ||
85 | FAIL_IF(event.result.enabled != event.result.running); | ||
86 | |||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | int main(void) | ||
91 | { | ||
92 | return test_harness(cpu_event_pinned_vs_ebb, "cpu_event_pinned_vs_ebb"); | ||
93 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c new file mode 100644 index 000000000000..8b972c2aa392 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c | |||
@@ -0,0 +1,89 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <signal.h> | ||
7 | #include <stdio.h> | ||
8 | #include <stdlib.h> | ||
9 | #include <stdbool.h> | ||
10 | #include <sys/types.h> | ||
11 | #include <sys/wait.h> | ||
12 | #include <unistd.h> | ||
13 | |||
14 | #include "ebb.h" | ||
15 | |||
16 | |||
17 | /* | ||
18 | * Tests a cpu event vs an EBB - in that order. The EBB should force the cpu | ||
19 | * event off the PMU. | ||
20 | */ | ||
21 | |||
22 | static int setup_cpu_event(struct event *event, int cpu) | ||
23 | { | ||
24 | event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); | ||
25 | |||
26 | event->attr.exclude_kernel = 1; | ||
27 | event->attr.exclude_hv = 1; | ||
28 | event->attr.exclude_idle = 1; | ||
29 | |||
30 | SKIP_IF(require_paranoia_below(1)); | ||
31 | FAIL_IF(event_open_with_cpu(event, cpu)); | ||
32 | FAIL_IF(event_enable(event)); | ||
33 | |||
34 | return 0; | ||
35 | } | ||
36 | |||
37 | int cpu_event_vs_ebb(void) | ||
38 | { | ||
39 | union pipe read_pipe, write_pipe; | ||
40 | struct event event; | ||
41 | int cpu, rc; | ||
42 | pid_t pid; | ||
43 | |||
44 | cpu = pick_online_cpu(); | ||
45 | FAIL_IF(cpu < 0); | ||
46 | FAIL_IF(bind_to_cpu(cpu)); | ||
47 | |||
48 | FAIL_IF(pipe(read_pipe.fds) == -1); | ||
49 | FAIL_IF(pipe(write_pipe.fds) == -1); | ||
50 | |||
51 | pid = fork(); | ||
52 | if (pid == 0) { | ||
53 | /* NB order of pipes looks reversed */ | ||
54 | exit(ebb_child(write_pipe, read_pipe)); | ||
55 | } | ||
56 | |||
57 | /* We setup the cpu event first */ | ||
58 | rc = setup_cpu_event(&event, cpu); | ||
59 | if (rc) { | ||
60 | kill_child_and_wait(pid); | ||
61 | return rc; | ||
62 | } | ||
63 | |||
64 | /* Signal the child to install its EBB event and wait */ | ||
65 | if (sync_with_child(read_pipe, write_pipe)) | ||
66 | /* If it fails, wait for it to exit */ | ||
67 | goto wait; | ||
68 | |||
69 | /* Signal the child to run */ | ||
70 | FAIL_IF(sync_with_child(read_pipe, write_pipe)); | ||
71 | |||
72 | wait: | ||
73 | /* We expect the child to succeed */ | ||
74 | FAIL_IF(wait_for_child(pid)); | ||
75 | |||
76 | FAIL_IF(event_disable(&event)); | ||
77 | FAIL_IF(event_read(&event)); | ||
78 | |||
79 | event_report(&event); | ||
80 | |||
81 | /* The cpu event may have run */ | ||
82 | |||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | int main(void) | ||
87 | { | ||
88 | return test_harness(cpu_event_vs_ebb, "cpu_event_vs_ebb"); | ||
89 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c new file mode 100644 index 000000000000..8590fc1bfc0d --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <stdio.h> | ||
7 | #include <stdlib.h> | ||
8 | |||
9 | #include "ebb.h" | ||
10 | |||
11 | |||
12 | /* | ||
13 | * Basic test that counts user cycles and takes EBBs. | ||
14 | */ | ||
15 | int cycles(void) | ||
16 | { | ||
17 | struct event event; | ||
18 | |||
19 | event_init_named(&event, 0x1001e, "cycles"); | ||
20 | event_leader_ebb_init(&event); | ||
21 | |||
22 | event.attr.exclude_kernel = 1; | ||
23 | event.attr.exclude_hv = 1; | ||
24 | event.attr.exclude_idle = 1; | ||
25 | |||
26 | FAIL_IF(event_open(&event)); | ||
27 | |||
28 | ebb_enable_pmc_counting(1); | ||
29 | setup_ebb_handler(standard_ebb_callee); | ||
30 | ebb_global_enable(); | ||
31 | FAIL_IF(ebb_event_enable(&event)); | ||
32 | |||
33 | mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); | ||
34 | |||
35 | while (ebb_state.stats.ebb_count < 10) { | ||
36 | FAIL_IF(core_busy_loop()); | ||
37 | FAIL_IF(ebb_check_mmcr0()); | ||
38 | } | ||
39 | |||
40 | ebb_global_disable(); | ||
41 | ebb_freeze_pmcs(); | ||
42 | |||
43 | count_pmc(1, sample_period); | ||
44 | |||
45 | dump_ebb_state(); | ||
46 | |||
47 | event_close(&event); | ||
48 | |||
49 | FAIL_IF(ebb_state.stats.ebb_count == 0); | ||
50 | FAIL_IF(!ebb_check_count(1, sample_period, 100)); | ||
51 | |||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | int main(void) | ||
56 | { | ||
57 | return test_harness(cycles, "cycles"); | ||
58 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c new file mode 100644 index 000000000000..754b3f2008d3 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <stdio.h> | ||
7 | #include <stdlib.h> | ||
8 | #include <stdbool.h> | ||
9 | |||
10 | #include "ebb.h" | ||
11 | |||
12 | |||
13 | /* | ||
14 | * Test of counting cycles while using MMCR0_FC (freeze counters) to only count | ||
15 | * parts of the code. This is complicated by the fact that FC is set by the | ||
16 | * hardware when the event overflows. We may take the EBB after we have set FC, | ||
17 | * so we have to be careful about whether we clear FC at the end of the EBB | ||
18 | * handler or not. | ||
19 | */ | ||
20 | |||
21 | static bool counters_frozen = false; | ||
22 | static int ebbs_while_frozen = 0; | ||
23 | |||
24 | static void ebb_callee(void) | ||
25 | { | ||
26 | uint64_t mask, val; | ||
27 | |||
28 | mask = MMCR0_PMAO | MMCR0_FC; | ||
29 | |||
30 | val = mfspr(SPRN_BESCR); | ||
31 | if (!(val & BESCR_PMEO)) { | ||
32 | ebb_state.stats.spurious++; | ||
33 | goto out; | ||
34 | } | ||
35 | |||
36 | ebb_state.stats.ebb_count++; | ||
37 | trace_log_counter(ebb_state.trace, ebb_state.stats.ebb_count); | ||
38 | |||
39 | val = mfspr(SPRN_MMCR0); | ||
40 | trace_log_reg(ebb_state.trace, SPRN_MMCR0, val); | ||
41 | |||
42 | if (counters_frozen) { | ||
43 | trace_log_string(ebb_state.trace, "frozen"); | ||
44 | ebbs_while_frozen++; | ||
45 | mask &= ~MMCR0_FC; | ||
46 | } | ||
47 | |||
48 | count_pmc(1, sample_period); | ||
49 | out: | ||
50 | reset_ebb_with_clear_mask(mask); | ||
51 | } | ||
52 | |||
53 | int cycles_with_freeze(void) | ||
54 | { | ||
55 | struct event event; | ||
56 | uint64_t val; | ||
57 | bool fc_cleared; | ||
58 | |||
59 | event_init_named(&event, 0x1001e, "cycles"); | ||
60 | event_leader_ebb_init(&event); | ||
61 | |||
62 | event.attr.exclude_kernel = 1; | ||
63 | event.attr.exclude_hv = 1; | ||
64 | event.attr.exclude_idle = 1; | ||
65 | |||
66 | FAIL_IF(event_open(&event)); | ||
67 | |||
68 | setup_ebb_handler(ebb_callee); | ||
69 | ebb_global_enable(); | ||
70 | FAIL_IF(ebb_event_enable(&event)); | ||
71 | |||
72 | mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); | ||
73 | |||
74 | fc_cleared = false; | ||
75 | |||
76 | /* Make sure we loop until we take at least one EBB */ | ||
77 | while ((ebb_state.stats.ebb_count < 20 && !fc_cleared) || | ||
78 | ebb_state.stats.ebb_count < 1) | ||
79 | { | ||
80 | counters_frozen = false; | ||
81 | mb(); | ||
82 | mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_FC); | ||
83 | |||
84 | FAIL_IF(core_busy_loop()); | ||
85 | |||
86 | counters_frozen = true; | ||
87 | mb(); | ||
88 | mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) | MMCR0_FC); | ||
89 | |||
90 | val = mfspr(SPRN_MMCR0); | ||
91 | if (! (val & MMCR0_FC)) { | ||
92 | printf("Outside of loop, FC NOT set MMCR0 0x%lx\n", val); | ||
93 | fc_cleared = true; | ||
94 | } | ||
95 | } | ||
96 | |||
97 | ebb_global_disable(); | ||
98 | ebb_freeze_pmcs(); | ||
99 | |||
100 | count_pmc(1, sample_period); | ||
101 | |||
102 | dump_ebb_state(); | ||
103 | |||
104 | printf("EBBs while frozen %d\n", ebbs_while_frozen); | ||
105 | |||
106 | event_close(&event); | ||
107 | |||
108 | FAIL_IF(ebb_state.stats.ebb_count == 0); | ||
109 | FAIL_IF(fc_cleared); | ||
110 | |||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | int main(void) | ||
115 | { | ||
116 | return test_harness(cycles_with_freeze, "cycles_with_freeze"); | ||
117 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c new file mode 100644 index 000000000000..1b46be94b64c --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c | |||
@@ -0,0 +1,727 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #define _GNU_SOURCE /* For CPU_ZERO etc. */ | ||
7 | |||
8 | #include <sched.h> | ||
9 | #include <sys/wait.h> | ||
10 | #include <setjmp.h> | ||
11 | #include <signal.h> | ||
12 | #include <stdio.h> | ||
13 | #include <stdlib.h> | ||
14 | #include <string.h> | ||
15 | #include <sys/ioctl.h> | ||
16 | |||
17 | #include "trace.h" | ||
18 | #include "reg.h" | ||
19 | #include "ebb.h" | ||
20 | |||
21 | |||
22 | void (*ebb_user_func)(void); | ||
23 | |||
24 | void ebb_hook(void) | ||
25 | { | ||
26 | if (ebb_user_func) | ||
27 | ebb_user_func(); | ||
28 | } | ||
29 | |||
30 | struct ebb_state ebb_state; | ||
31 | |||
32 | u64 sample_period = 0x40000000ull; | ||
33 | |||
34 | void reset_ebb_with_clear_mask(unsigned long mmcr0_clear_mask) | ||
35 | { | ||
36 | u64 val; | ||
37 | |||
38 | /* 2) clear MMCR0[PMAO] - docs say BESCR[PMEO] should do this */ | ||
39 | /* 3) set MMCR0[PMAE] - docs say BESCR[PME] should do this */ | ||
40 | val = mfspr(SPRN_MMCR0); | ||
41 | mtspr(SPRN_MMCR0, (val & ~mmcr0_clear_mask) | MMCR0_PMAE); | ||
42 | |||
43 | /* 4) clear BESCR[PMEO] */ | ||
44 | mtspr(SPRN_BESCRR, BESCR_PMEO); | ||
45 | |||
46 | /* 5) set BESCR[PME] */ | ||
47 | mtspr(SPRN_BESCRS, BESCR_PME); | ||
48 | |||
49 | /* 6) rfebb 1 - done in our caller */ | ||
50 | } | ||
51 | |||
52 | void reset_ebb(void) | ||
53 | { | ||
54 | reset_ebb_with_clear_mask(MMCR0_PMAO | MMCR0_FC); | ||
55 | } | ||
56 | |||
57 | /* Called outside of the EBB handler to check MMCR0 is sane */ | ||
58 | int ebb_check_mmcr0(void) | ||
59 | { | ||
60 | u64 val; | ||
61 | |||
62 | val = mfspr(SPRN_MMCR0); | ||
63 | if ((val & (MMCR0_FC | MMCR0_PMAO)) == MMCR0_FC) { | ||
64 | /* It's OK if we see FC & PMAO, but not FC by itself */ | ||
65 | printf("Outside of loop, only FC set 0x%llx\n", val); | ||
66 | return 1; | ||
67 | } | ||
68 | |||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | bool ebb_check_count(int pmc, u64 sample_period, int fudge) | ||
73 | { | ||
74 | u64 count, upper, lower; | ||
75 | |||
76 | count = ebb_state.stats.pmc_count[PMC_INDEX(pmc)]; | ||
77 | |||
78 | lower = ebb_state.stats.ebb_count * (sample_period - fudge); | ||
79 | |||
80 | if (count < lower) { | ||
81 | printf("PMC%d count (0x%llx) below lower limit 0x%llx (-0x%llx)\n", | ||
82 | pmc, count, lower, lower - count); | ||
83 | return false; | ||
84 | } | ||
85 | |||
86 | upper = ebb_state.stats.ebb_count * (sample_period + fudge); | ||
87 | |||
88 | if (count > upper) { | ||
89 | printf("PMC%d count (0x%llx) above upper limit 0x%llx (+0x%llx)\n", | ||
90 | pmc, count, upper, count - upper); | ||
91 | return false; | ||
92 | } | ||
93 | |||
94 | printf("PMC%d count (0x%llx) is between 0x%llx and 0x%llx delta +0x%llx/-0x%llx\n", | ||
95 | pmc, count, lower, upper, count - lower, upper - count); | ||
96 | |||
97 | return true; | ||
98 | } | ||
99 | |||
100 | void standard_ebb_callee(void) | ||
101 | { | ||
102 | int found, i; | ||
103 | u64 val; | ||
104 | |||
105 | val = mfspr(SPRN_BESCR); | ||
106 | if (!(val & BESCR_PMEO)) { | ||
107 | ebb_state.stats.spurious++; | ||
108 | goto out; | ||
109 | } | ||
110 | |||
111 | ebb_state.stats.ebb_count++; | ||
112 | trace_log_counter(ebb_state.trace, ebb_state.stats.ebb_count); | ||
113 | |||
114 | val = mfspr(SPRN_MMCR0); | ||
115 | trace_log_reg(ebb_state.trace, SPRN_MMCR0, val); | ||
116 | |||
117 | found = 0; | ||
118 | for (i = 1; i <= 6; i++) { | ||
119 | if (ebb_state.pmc_enable[PMC_INDEX(i)]) | ||
120 | found += count_pmc(i, sample_period); | ||
121 | } | ||
122 | |||
123 | if (!found) | ||
124 | ebb_state.stats.no_overflow++; | ||
125 | |||
126 | out: | ||
127 | reset_ebb(); | ||
128 | } | ||
129 | |||
130 | extern void ebb_handler(void); | ||
131 | |||
132 | void setup_ebb_handler(void (*callee)(void)) | ||
133 | { | ||
134 | u64 entry; | ||
135 | |||
136 | #if defined(_CALL_ELF) && _CALL_ELF == 2 | ||
137 | entry = (u64)ebb_handler; | ||
138 | #else | ||
139 | struct opd | ||
140 | { | ||
141 | u64 entry; | ||
142 | u64 toc; | ||
143 | } *opd; | ||
144 | |||
145 | opd = (struct opd *)ebb_handler; | ||
146 | entry = opd->entry; | ||
147 | #endif | ||
148 | printf("EBB Handler is at %#llx\n", entry); | ||
149 | |||
150 | ebb_user_func = callee; | ||
151 | |||
152 | /* Ensure ebb_user_func is set before we set the handler */ | ||
153 | mb(); | ||
154 | mtspr(SPRN_EBBHR, entry); | ||
155 | |||
156 | /* Make sure the handler is set before we return */ | ||
157 | mb(); | ||
158 | } | ||
159 | |||
160 | void clear_ebb_stats(void) | ||
161 | { | ||
162 | memset(&ebb_state.stats, 0, sizeof(ebb_state.stats)); | ||
163 | } | ||
164 | |||
165 | void dump_summary_ebb_state(void) | ||
166 | { | ||
167 | printf("ebb_state:\n" \ | ||
168 | " ebb_count = %d\n" \ | ||
169 | " spurious = %d\n" \ | ||
170 | " negative = %d\n" \ | ||
171 | " no_overflow = %d\n" \ | ||
172 | " pmc[1] count = 0x%llx\n" \ | ||
173 | " pmc[2] count = 0x%llx\n" \ | ||
174 | " pmc[3] count = 0x%llx\n" \ | ||
175 | " pmc[4] count = 0x%llx\n" \ | ||
176 | " pmc[5] count = 0x%llx\n" \ | ||
177 | " pmc[6] count = 0x%llx\n", | ||
178 | ebb_state.stats.ebb_count, ebb_state.stats.spurious, | ||
179 | ebb_state.stats.negative, ebb_state.stats.no_overflow, | ||
180 | ebb_state.stats.pmc_count[0], ebb_state.stats.pmc_count[1], | ||
181 | ebb_state.stats.pmc_count[2], ebb_state.stats.pmc_count[3], | ||
182 | ebb_state.stats.pmc_count[4], ebb_state.stats.pmc_count[5]); | ||
183 | } | ||
184 | |||
185 | static char *decode_mmcr0(u32 value) | ||
186 | { | ||
187 | static char buf[16]; | ||
188 | |||
189 | buf[0] = '\0'; | ||
190 | |||
191 | if (value & (1 << 31)) | ||
192 | strcat(buf, "FC "); | ||
193 | if (value & (1 << 26)) | ||
194 | strcat(buf, "PMAE "); | ||
195 | if (value & (1 << 7)) | ||
196 | strcat(buf, "PMAO "); | ||
197 | |||
198 | return buf; | ||
199 | } | ||
200 | |||
201 | static char *decode_bescr(u64 value) | ||
202 | { | ||
203 | static char buf[16]; | ||
204 | |||
205 | buf[0] = '\0'; | ||
206 | |||
207 | if (value & (1ull << 63)) | ||
208 | strcat(buf, "GE "); | ||
209 | if (value & (1ull << 32)) | ||
210 | strcat(buf, "PMAE "); | ||
211 | if (value & 1) | ||
212 | strcat(buf, "PMAO "); | ||
213 | |||
214 | return buf; | ||
215 | } | ||
216 | |||
217 | void dump_ebb_hw_state(void) | ||
218 | { | ||
219 | u64 bescr; | ||
220 | u32 mmcr0; | ||
221 | |||
222 | mmcr0 = mfspr(SPRN_MMCR0); | ||
223 | bescr = mfspr(SPRN_BESCR); | ||
224 | |||
225 | printf("HW state:\n" \ | ||
226 | "MMCR0 0x%016x %s\n" \ | ||
227 | "EBBHR 0x%016lx\n" \ | ||
228 | "BESCR 0x%016llx %s\n" \ | ||
229 | "PMC1 0x%016lx\n" \ | ||
230 | "PMC2 0x%016lx\n" \ | ||
231 | "PMC3 0x%016lx\n" \ | ||
232 | "PMC4 0x%016lx\n" \ | ||
233 | "PMC5 0x%016lx\n" \ | ||
234 | "PMC6 0x%016lx\n" \ | ||
235 | "SIAR 0x%016lx\n", | ||
236 | mmcr0, decode_mmcr0(mmcr0), mfspr(SPRN_EBBHR), bescr, | ||
237 | decode_bescr(bescr), mfspr(SPRN_PMC1), mfspr(SPRN_PMC2), | ||
238 | mfspr(SPRN_PMC3), mfspr(SPRN_PMC4), mfspr(SPRN_PMC5), | ||
239 | mfspr(SPRN_PMC6), mfspr(SPRN_SIAR)); | ||
240 | } | ||
241 | |||
242 | void dump_ebb_state(void) | ||
243 | { | ||
244 | dump_summary_ebb_state(); | ||
245 | |||
246 | dump_ebb_hw_state(); | ||
247 | |||
248 | trace_buffer_print(ebb_state.trace); | ||
249 | } | ||
250 | |||
251 | int count_pmc(int pmc, uint32_t sample_period) | ||
252 | { | ||
253 | uint32_t start_value; | ||
254 | u64 val; | ||
255 | |||
256 | /* 0) Read PMC */ | ||
257 | start_value = pmc_sample_period(sample_period); | ||
258 | |||
259 | val = read_pmc(pmc); | ||
260 | if (val < start_value) | ||
261 | ebb_state.stats.negative++; | ||
262 | else | ||
263 | ebb_state.stats.pmc_count[PMC_INDEX(pmc)] += val - start_value; | ||
264 | |||
265 | trace_log_reg(ebb_state.trace, SPRN_PMC1 + pmc - 1, val); | ||
266 | |||
267 | /* 1) Reset PMC */ | ||
268 | write_pmc(pmc, start_value); | ||
269 | |||
270 | /* Report if we overflowed */ | ||
271 | return val >= COUNTER_OVERFLOW; | ||
272 | } | ||
273 | |||
274 | int ebb_event_enable(struct event *e) | ||
275 | { | ||
276 | int rc; | ||
277 | |||
278 | /* Ensure any SPR writes are ordered vs us */ | ||
279 | mb(); | ||
280 | |||
281 | rc = ioctl(e->fd, PERF_EVENT_IOC_ENABLE); | ||
282 | if (rc) | ||
283 | return rc; | ||
284 | |||
285 | rc = event_read(e); | ||
286 | |||
287 | /* Ditto */ | ||
288 | mb(); | ||
289 | |||
290 | return rc; | ||
291 | } | ||
292 | |||
293 | void ebb_freeze_pmcs(void) | ||
294 | { | ||
295 | mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) | MMCR0_FC); | ||
296 | mb(); | ||
297 | } | ||
298 | |||
299 | void ebb_unfreeze_pmcs(void) | ||
300 | { | ||
301 | /* Unfreeze counters */ | ||
302 | mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_FC); | ||
303 | mb(); | ||
304 | } | ||
305 | |||
306 | void ebb_global_enable(void) | ||
307 | { | ||
308 | /* Enable EBBs globally and PMU EBBs */ | ||
309 | mtspr(SPRN_BESCR, 0x8000000100000000ull); | ||
310 | mb(); | ||
311 | } | ||
312 | |||
313 | void ebb_global_disable(void) | ||
314 | { | ||
315 | /* Disable EBBs & freeze counters, events are still scheduled */ | ||
316 | mtspr(SPRN_BESCRR, BESCR_PME); | ||
317 | mb(); | ||
318 | } | ||
319 | |||
320 | void event_ebb_init(struct event *e) | ||
321 | { | ||
322 | e->attr.config |= (1ull << 63); | ||
323 | } | ||
324 | |||
325 | void event_bhrb_init(struct event *e, unsigned ifm) | ||
326 | { | ||
327 | e->attr.config |= (1ull << 62) | ((u64)ifm << 60); | ||
328 | } | ||
329 | |||
330 | void event_leader_ebb_init(struct event *e) | ||
331 | { | ||
332 | event_ebb_init(e); | ||
333 | |||
334 | e->attr.exclusive = 1; | ||
335 | e->attr.pinned = 1; | ||
336 | } | ||
337 | |||
338 | int core_busy_loop(void) | ||
339 | { | ||
340 | int rc; | ||
341 | |||
342 | asm volatile ( | ||
343 | "li 3, 0x3030\n" | ||
344 | "std 3, -96(1)\n" | ||
345 | "li 4, 0x4040\n" | ||
346 | "std 4, -104(1)\n" | ||
347 | "li 5, 0x5050\n" | ||
348 | "std 5, -112(1)\n" | ||
349 | "li 6, 0x6060\n" | ||
350 | "std 6, -120(1)\n" | ||
351 | "li 7, 0x7070\n" | ||
352 | "std 7, -128(1)\n" | ||
353 | "li 8, 0x0808\n" | ||
354 | "std 8, -136(1)\n" | ||
355 | "li 9, 0x0909\n" | ||
356 | "std 9, -144(1)\n" | ||
357 | "li 10, 0x1010\n" | ||
358 | "std 10, -152(1)\n" | ||
359 | "li 11, 0x1111\n" | ||
360 | "std 11, -160(1)\n" | ||
361 | "li 14, 0x1414\n" | ||
362 | "std 14, -168(1)\n" | ||
363 | "li 15, 0x1515\n" | ||
364 | "std 15, -176(1)\n" | ||
365 | "li 16, 0x1616\n" | ||
366 | "std 16, -184(1)\n" | ||
367 | "li 17, 0x1717\n" | ||
368 | "std 17, -192(1)\n" | ||
369 | "li 18, 0x1818\n" | ||
370 | "std 18, -200(1)\n" | ||
371 | "li 19, 0x1919\n" | ||
372 | "std 19, -208(1)\n" | ||
373 | "li 20, 0x2020\n" | ||
374 | "std 20, -216(1)\n" | ||
375 | "li 21, 0x2121\n" | ||
376 | "std 21, -224(1)\n" | ||
377 | "li 22, 0x2222\n" | ||
378 | "std 22, -232(1)\n" | ||
379 | "li 23, 0x2323\n" | ||
380 | "std 23, -240(1)\n" | ||
381 | "li 24, 0x2424\n" | ||
382 | "std 24, -248(1)\n" | ||
383 | "li 25, 0x2525\n" | ||
384 | "std 25, -256(1)\n" | ||
385 | "li 26, 0x2626\n" | ||
386 | "std 26, -264(1)\n" | ||
387 | "li 27, 0x2727\n" | ||
388 | "std 27, -272(1)\n" | ||
389 | "li 28, 0x2828\n" | ||
390 | "std 28, -280(1)\n" | ||
391 | "li 29, 0x2929\n" | ||
392 | "std 29, -288(1)\n" | ||
393 | "li 30, 0x3030\n" | ||
394 | "li 31, 0x3131\n" | ||
395 | |||
396 | "li 3, 0\n" | ||
397 | "0: " | ||
398 | "addi 3, 3, 1\n" | ||
399 | "cmpwi 3, 100\n" | ||
400 | "blt 0b\n" | ||
401 | |||
402 | /* Return 1 (fail) unless we get through all the checks */ | ||
403 | "li 0, 1\n" | ||
404 | |||
405 | /* Check none of our registers have been corrupted */ | ||
406 | "cmpwi 4, 0x4040\n" | ||
407 | "bne 1f\n" | ||
408 | "cmpwi 5, 0x5050\n" | ||
409 | "bne 1f\n" | ||
410 | "cmpwi 6, 0x6060\n" | ||
411 | "bne 1f\n" | ||
412 | "cmpwi 7, 0x7070\n" | ||
413 | "bne 1f\n" | ||
414 | "cmpwi 8, 0x0808\n" | ||
415 | "bne 1f\n" | ||
416 | "cmpwi 9, 0x0909\n" | ||
417 | "bne 1f\n" | ||
418 | "cmpwi 10, 0x1010\n" | ||
419 | "bne 1f\n" | ||
420 | "cmpwi 11, 0x1111\n" | ||
421 | "bne 1f\n" | ||
422 | "cmpwi 14, 0x1414\n" | ||
423 | "bne 1f\n" | ||
424 | "cmpwi 15, 0x1515\n" | ||
425 | "bne 1f\n" | ||
426 | "cmpwi 16, 0x1616\n" | ||
427 | "bne 1f\n" | ||
428 | "cmpwi 17, 0x1717\n" | ||
429 | "bne 1f\n" | ||
430 | "cmpwi 18, 0x1818\n" | ||
431 | "bne 1f\n" | ||
432 | "cmpwi 19, 0x1919\n" | ||
433 | "bne 1f\n" | ||
434 | "cmpwi 20, 0x2020\n" | ||
435 | "bne 1f\n" | ||
436 | "cmpwi 21, 0x2121\n" | ||
437 | "bne 1f\n" | ||
438 | "cmpwi 22, 0x2222\n" | ||
439 | "bne 1f\n" | ||
440 | "cmpwi 23, 0x2323\n" | ||
441 | "bne 1f\n" | ||
442 | "cmpwi 24, 0x2424\n" | ||
443 | "bne 1f\n" | ||
444 | "cmpwi 25, 0x2525\n" | ||
445 | "bne 1f\n" | ||
446 | "cmpwi 26, 0x2626\n" | ||
447 | "bne 1f\n" | ||
448 | "cmpwi 27, 0x2727\n" | ||
449 | "bne 1f\n" | ||
450 | "cmpwi 28, 0x2828\n" | ||
451 | "bne 1f\n" | ||
452 | "cmpwi 29, 0x2929\n" | ||
453 | "bne 1f\n" | ||
454 | "cmpwi 30, 0x3030\n" | ||
455 | "bne 1f\n" | ||
456 | "cmpwi 31, 0x3131\n" | ||
457 | "bne 1f\n" | ||
458 | |||
459 | /* Load junk into all our registers before we reload them from the stack. */ | ||
460 | "li 3, 0xde\n" | ||
461 | "li 4, 0xad\n" | ||
462 | "li 5, 0xbe\n" | ||
463 | "li 6, 0xef\n" | ||
464 | "li 7, 0xde\n" | ||
465 | "li 8, 0xad\n" | ||
466 | "li 9, 0xbe\n" | ||
467 | "li 10, 0xef\n" | ||
468 | "li 11, 0xde\n" | ||
469 | "li 14, 0xad\n" | ||
470 | "li 15, 0xbe\n" | ||
471 | "li 16, 0xef\n" | ||
472 | "li 17, 0xde\n" | ||
473 | "li 18, 0xad\n" | ||
474 | "li 19, 0xbe\n" | ||
475 | "li 20, 0xef\n" | ||
476 | "li 21, 0xde\n" | ||
477 | "li 22, 0xad\n" | ||
478 | "li 23, 0xbe\n" | ||
479 | "li 24, 0xef\n" | ||
480 | "li 25, 0xde\n" | ||
481 | "li 26, 0xad\n" | ||
482 | "li 27, 0xbe\n" | ||
483 | "li 28, 0xef\n" | ||
484 | "li 29, 0xdd\n" | ||
485 | |||
486 | "ld 3, -96(1)\n" | ||
487 | "cmpwi 3, 0x3030\n" | ||
488 | "bne 1f\n" | ||
489 | "ld 4, -104(1)\n" | ||
490 | "cmpwi 4, 0x4040\n" | ||
491 | "bne 1f\n" | ||
492 | "ld 5, -112(1)\n" | ||
493 | "cmpwi 5, 0x5050\n" | ||
494 | "bne 1f\n" | ||
495 | "ld 6, -120(1)\n" | ||
496 | "cmpwi 6, 0x6060\n" | ||
497 | "bne 1f\n" | ||
498 | "ld 7, -128(1)\n" | ||
499 | "cmpwi 7, 0x7070\n" | ||
500 | "bne 1f\n" | ||
501 | "ld 8, -136(1)\n" | ||
502 | "cmpwi 8, 0x0808\n" | ||
503 | "bne 1f\n" | ||
504 | "ld 9, -144(1)\n" | ||
505 | "cmpwi 9, 0x0909\n" | ||
506 | "bne 1f\n" | ||
507 | "ld 10, -152(1)\n" | ||
508 | "cmpwi 10, 0x1010\n" | ||
509 | "bne 1f\n" | ||
510 | "ld 11, -160(1)\n" | ||
511 | "cmpwi 11, 0x1111\n" | ||
512 | "bne 1f\n" | ||
513 | "ld 14, -168(1)\n" | ||
514 | "cmpwi 14, 0x1414\n" | ||
515 | "bne 1f\n" | ||
516 | "ld 15, -176(1)\n" | ||
517 | "cmpwi 15, 0x1515\n" | ||
518 | "bne 1f\n" | ||
519 | "ld 16, -184(1)\n" | ||
520 | "cmpwi 16, 0x1616\n" | ||
521 | "bne 1f\n" | ||
522 | "ld 17, -192(1)\n" | ||
523 | "cmpwi 17, 0x1717\n" | ||
524 | "bne 1f\n" | ||
525 | "ld 18, -200(1)\n" | ||
526 | "cmpwi 18, 0x1818\n" | ||
527 | "bne 1f\n" | ||
528 | "ld 19, -208(1)\n" | ||
529 | "cmpwi 19, 0x1919\n" | ||
530 | "bne 1f\n" | ||
531 | "ld 20, -216(1)\n" | ||
532 | "cmpwi 20, 0x2020\n" | ||
533 | "bne 1f\n" | ||
534 | "ld 21, -224(1)\n" | ||
535 | "cmpwi 21, 0x2121\n" | ||
536 | "bne 1f\n" | ||
537 | "ld 22, -232(1)\n" | ||
538 | "cmpwi 22, 0x2222\n" | ||
539 | "bne 1f\n" | ||
540 | "ld 23, -240(1)\n" | ||
541 | "cmpwi 23, 0x2323\n" | ||
542 | "bne 1f\n" | ||
543 | "ld 24, -248(1)\n" | ||
544 | "cmpwi 24, 0x2424\n" | ||
545 | "bne 1f\n" | ||
546 | "ld 25, -256(1)\n" | ||
547 | "cmpwi 25, 0x2525\n" | ||
548 | "bne 1f\n" | ||
549 | "ld 26, -264(1)\n" | ||
550 | "cmpwi 26, 0x2626\n" | ||
551 | "bne 1f\n" | ||
552 | "ld 27, -272(1)\n" | ||
553 | "cmpwi 27, 0x2727\n" | ||
554 | "bne 1f\n" | ||
555 | "ld 28, -280(1)\n" | ||
556 | "cmpwi 28, 0x2828\n" | ||
557 | "bne 1f\n" | ||
558 | "ld 29, -288(1)\n" | ||
559 | "cmpwi 29, 0x2929\n" | ||
560 | "bne 1f\n" | ||
561 | |||
562 | /* Load 0 (success) to return */ | ||
563 | "li 0, 0\n" | ||
564 | |||
565 | "1: mr %0, 0\n" | ||
566 | |||
567 | : "=r" (rc) | ||
568 | : /* no inputs */ | ||
569 | : "3", "4", "5", "6", "7", "8", "9", "10", "11", "14", | ||
570 | "15", "16", "17", "18", "19", "20", "21", "22", "23", | ||
571 | "24", "25", "26", "27", "28", "29", "30", "31", | ||
572 | "memory" | ||
573 | ); | ||
574 | |||
575 | return rc; | ||
576 | } | ||
577 | |||
578 | int core_busy_loop_with_freeze(void) | ||
579 | { | ||
580 | int rc; | ||
581 | |||
582 | mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_FC); | ||
583 | rc = core_busy_loop(); | ||
584 | mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) | MMCR0_FC); | ||
585 | |||
586 | return rc; | ||
587 | } | ||
588 | |||
589 | int ebb_child(union pipe read_pipe, union pipe write_pipe) | ||
590 | { | ||
591 | struct event event; | ||
592 | uint64_t val; | ||
593 | |||
594 | FAIL_IF(wait_for_parent(read_pipe)); | ||
595 | |||
596 | event_init_named(&event, 0x1001e, "cycles"); | ||
597 | event_leader_ebb_init(&event); | ||
598 | |||
599 | event.attr.exclude_kernel = 1; | ||
600 | event.attr.exclude_hv = 1; | ||
601 | event.attr.exclude_idle = 1; | ||
602 | |||
603 | FAIL_IF(event_open(&event)); | ||
604 | |||
605 | ebb_enable_pmc_counting(1); | ||
606 | setup_ebb_handler(standard_ebb_callee); | ||
607 | ebb_global_enable(); | ||
608 | |||
609 | FAIL_IF(event_enable(&event)); | ||
610 | |||
611 | if (event_read(&event)) { | ||
612 | /* | ||
613 | * Some tests expect to fail here, so don't report an error on | ||
614 | * this line, and return a distinguisable error code. Tell the | ||
615 | * parent an error happened. | ||
616 | */ | ||
617 | notify_parent_of_error(write_pipe); | ||
618 | return 2; | ||
619 | } | ||
620 | |||
621 | mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); | ||
622 | |||
623 | FAIL_IF(notify_parent(write_pipe)); | ||
624 | FAIL_IF(wait_for_parent(read_pipe)); | ||
625 | FAIL_IF(notify_parent(write_pipe)); | ||
626 | |||
627 | while (ebb_state.stats.ebb_count < 20) { | ||
628 | FAIL_IF(core_busy_loop()); | ||
629 | |||
630 | /* To try and hit SIGILL case */ | ||
631 | val = mfspr(SPRN_MMCRA); | ||
632 | val |= mfspr(SPRN_MMCR2); | ||
633 | val |= mfspr(SPRN_MMCR0); | ||
634 | } | ||
635 | |||
636 | ebb_global_disable(); | ||
637 | ebb_freeze_pmcs(); | ||
638 | |||
639 | count_pmc(1, sample_period); | ||
640 | |||
641 | dump_ebb_state(); | ||
642 | |||
643 | event_close(&event); | ||
644 | |||
645 | FAIL_IF(ebb_state.stats.ebb_count == 0); | ||
646 | |||
647 | return 0; | ||
648 | } | ||
649 | |||
650 | static jmp_buf setjmp_env; | ||
651 | |||
652 | static void sigill_handler(int signal) | ||
653 | { | ||
654 | printf("Took sigill\n"); | ||
655 | longjmp(setjmp_env, 1); | ||
656 | } | ||
657 | |||
658 | static struct sigaction sigill_action = { | ||
659 | .sa_handler = sigill_handler, | ||
660 | }; | ||
661 | |||
662 | int catch_sigill(void (*func)(void)) | ||
663 | { | ||
664 | if (sigaction(SIGILL, &sigill_action, NULL)) { | ||
665 | perror("sigaction"); | ||
666 | return 1; | ||
667 | } | ||
668 | |||
669 | if (setjmp(setjmp_env) == 0) { | ||
670 | func(); | ||
671 | return 1; | ||
672 | } | ||
673 | |||
674 | return 0; | ||
675 | } | ||
676 | |||
677 | void write_pmc1(void) | ||
678 | { | ||
679 | mtspr(SPRN_PMC1, 0); | ||
680 | } | ||
681 | |||
682 | void write_pmc(int pmc, u64 value) | ||
683 | { | ||
684 | switch (pmc) { | ||
685 | case 1: mtspr(SPRN_PMC1, value); break; | ||
686 | case 2: mtspr(SPRN_PMC2, value); break; | ||
687 | case 3: mtspr(SPRN_PMC3, value); break; | ||
688 | case 4: mtspr(SPRN_PMC4, value); break; | ||
689 | case 5: mtspr(SPRN_PMC5, value); break; | ||
690 | case 6: mtspr(SPRN_PMC6, value); break; | ||
691 | } | ||
692 | } | ||
693 | |||
694 | u64 read_pmc(int pmc) | ||
695 | { | ||
696 | switch (pmc) { | ||
697 | case 1: return mfspr(SPRN_PMC1); | ||
698 | case 2: return mfspr(SPRN_PMC2); | ||
699 | case 3: return mfspr(SPRN_PMC3); | ||
700 | case 4: return mfspr(SPRN_PMC4); | ||
701 | case 5: return mfspr(SPRN_PMC5); | ||
702 | case 6: return mfspr(SPRN_PMC6); | ||
703 | } | ||
704 | |||
705 | return 0; | ||
706 | } | ||
707 | |||
708 | static void term_handler(int signal) | ||
709 | { | ||
710 | dump_summary_ebb_state(); | ||
711 | dump_ebb_hw_state(); | ||
712 | abort(); | ||
713 | } | ||
714 | |||
715 | struct sigaction term_action = { | ||
716 | .sa_handler = term_handler, | ||
717 | }; | ||
718 | |||
719 | static void __attribute__((constructor)) ebb_init(void) | ||
720 | { | ||
721 | clear_ebb_stats(); | ||
722 | |||
723 | if (sigaction(SIGTERM, &term_action, NULL)) | ||
724 | perror("sigaction"); | ||
725 | |||
726 | ebb_state.trace = trace_buffer_allocate(1 * 1024 * 1024); | ||
727 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb.h b/tools/testing/selftests/powerpc/pmu/ebb/ebb.h new file mode 100644 index 000000000000..e62bde05bf78 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb.h | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #ifndef _SELFTESTS_POWERPC_PMU_EBB_EBB_H | ||
7 | #define _SELFTESTS_POWERPC_PMU_EBB_EBB_H | ||
8 | |||
9 | #include "../event.h" | ||
10 | #include "../lib.h" | ||
11 | #include "trace.h" | ||
12 | #include "reg.h" | ||
13 | |||
14 | #define PMC_INDEX(pmc) ((pmc)-1) | ||
15 | |||
16 | #define NUM_PMC_VALUES 128 | ||
17 | |||
18 | struct ebb_state | ||
19 | { | ||
20 | struct { | ||
21 | u64 pmc_count[6]; | ||
22 | volatile int ebb_count; | ||
23 | int spurious; | ||
24 | int negative; | ||
25 | int no_overflow; | ||
26 | } stats; | ||
27 | |||
28 | bool pmc_enable[6]; | ||
29 | struct trace_buffer *trace; | ||
30 | }; | ||
31 | |||
32 | extern struct ebb_state ebb_state; | ||
33 | |||
34 | #define COUNTER_OVERFLOW 0x80000000ull | ||
35 | |||
36 | static inline uint32_t pmc_sample_period(uint32_t value) | ||
37 | { | ||
38 | return COUNTER_OVERFLOW - value; | ||
39 | } | ||
40 | |||
41 | static inline void ebb_enable_pmc_counting(int pmc) | ||
42 | { | ||
43 | ebb_state.pmc_enable[PMC_INDEX(pmc)] = true; | ||
44 | } | ||
45 | |||
46 | bool ebb_check_count(int pmc, u64 sample_period, int fudge); | ||
47 | void event_leader_ebb_init(struct event *e); | ||
48 | void event_ebb_init(struct event *e); | ||
49 | void event_bhrb_init(struct event *e, unsigned ifm); | ||
50 | void setup_ebb_handler(void (*callee)(void)); | ||
51 | void standard_ebb_callee(void); | ||
52 | int ebb_event_enable(struct event *e); | ||
53 | void ebb_global_enable(void); | ||
54 | void ebb_global_disable(void); | ||
55 | void ebb_freeze_pmcs(void); | ||
56 | void ebb_unfreeze_pmcs(void); | ||
57 | void event_ebb_init(struct event *e); | ||
58 | void event_leader_ebb_init(struct event *e); | ||
59 | int count_pmc(int pmc, uint32_t sample_period); | ||
60 | void dump_ebb_state(void); | ||
61 | void dump_summary_ebb_state(void); | ||
62 | void dump_ebb_hw_state(void); | ||
63 | void clear_ebb_stats(void); | ||
64 | void write_pmc(int pmc, u64 value); | ||
65 | u64 read_pmc(int pmc); | ||
66 | void reset_ebb_with_clear_mask(unsigned long mmcr0_clear_mask); | ||
67 | void reset_ebb(void); | ||
68 | int ebb_check_mmcr0(void); | ||
69 | |||
70 | extern u64 sample_period; | ||
71 | |||
72 | int core_busy_loop(void); | ||
73 | int core_busy_loop_with_freeze(void); | ||
74 | int ebb_child(union pipe read_pipe, union pipe write_pipe); | ||
75 | int catch_sigill(void (*func)(void)); | ||
76 | void write_pmc1(void); | ||
77 | |||
78 | #endif /* _SELFTESTS_POWERPC_PMU_EBB_EBB_H */ | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_handler.S b/tools/testing/selftests/powerpc/pmu/ebb/ebb_handler.S new file mode 100644 index 000000000000..14274ea206e5 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_handler.S | |||
@@ -0,0 +1,365 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <ppc-asm.h> | ||
7 | #include "reg.h" | ||
8 | |||
9 | |||
10 | /* ppc-asm.h defines most of the reg aliases, but not r1/r2. */ | ||
11 | #define r1 1 | ||
12 | #define r2 2 | ||
13 | |||
14 | #define RFEBB .long 0x4c000924 | ||
15 | |||
16 | /* Stack layout: | ||
17 | * | ||
18 | * ^ | ||
19 | * User stack | | ||
20 | * Back chain ------+ <- r1 <-------+ | ||
21 | * ... | | ||
22 | * Red zone / ABI Gap | | ||
23 | * ... | | ||
24 | * vr63 <+ | | ||
25 | * vr0 | | | ||
26 | * VSCR | | | ||
27 | * FSCR | | | ||
28 | * r31 | Save area | | ||
29 | * r0 | | | ||
30 | * XER | | | ||
31 | * CTR | | | ||
32 | * LR | | | ||
33 | * CCR <+ | | ||
34 | * ... <+ | | ||
35 | * LR | Caller frame | | ||
36 | * CCR | | | ||
37 | * Back chain <+ <- updated r1 --------+ | ||
38 | * | ||
39 | */ | ||
40 | |||
41 | #if defined(_CALL_ELF) && _CALL_ELF == 2 | ||
42 | #define ABIGAP 512 | ||
43 | #else | ||
44 | #define ABIGAP 288 | ||
45 | #endif | ||
46 | |||
47 | #define NR_GPR 32 | ||
48 | #define NR_SPR 6 | ||
49 | #define NR_VSR 64 | ||
50 | |||
51 | #define SAVE_AREA ((NR_GPR + NR_SPR) * 8 + (NR_VSR * 16)) | ||
52 | #define CALLER_FRAME 112 | ||
53 | |||
54 | #define STACK_FRAME (ABIGAP + SAVE_AREA + CALLER_FRAME) | ||
55 | |||
56 | #define CCR_SAVE (CALLER_FRAME) | ||
57 | #define LR_SAVE (CCR_SAVE + 8) | ||
58 | #define CTR_SAVE (LR_SAVE + 8) | ||
59 | #define XER_SAVE (CTR_SAVE + 8) | ||
60 | #define GPR_SAVE(n) (XER_SAVE + 8 + (8 * n)) | ||
61 | #define FSCR_SAVE (GPR_SAVE(31) + 8) | ||
62 | #define VSCR_SAVE (FSCR_SAVE + 8) | ||
63 | #define VSR_SAVE(n) (VSCR_SAVE + 8 + (16 * n)) | ||
64 | |||
65 | #define SAVE_GPR(n) std n,GPR_SAVE(n)(r1) | ||
66 | #define REST_GPR(n) ld n,GPR_SAVE(n)(r1) | ||
67 | #define TRASH_GPR(n) lis n,0xaaaa | ||
68 | |||
69 | #define SAVE_VSR(n, b) li b, VSR_SAVE(n); stxvd2x n,b,r1 | ||
70 | #define LOAD_VSR(n, b) li b, VSR_SAVE(n); lxvd2x n,b,r1 | ||
71 | |||
72 | #define LOAD_REG_IMMEDIATE(reg,expr) \ | ||
73 | lis reg,(expr)@highest; \ | ||
74 | ori reg,reg,(expr)@higher; \ | ||
75 | rldicr reg,reg,32,31; \ | ||
76 | oris reg,reg,(expr)@h; \ | ||
77 | ori reg,reg,(expr)@l; | ||
78 | |||
79 | |||
80 | #if defined(_CALL_ELF) && _CALL_ELF == 2 | ||
81 | #define ENTRY_POINT(name) \ | ||
82 | .type FUNC_NAME(name),@function; \ | ||
83 | .globl FUNC_NAME(name); \ | ||
84 | FUNC_NAME(name): | ||
85 | |||
86 | #define RESTORE_TOC(name) \ | ||
87 | /* Restore our TOC pointer using our entry point */ \ | ||
88 | LOAD_REG_IMMEDIATE(r12, name) \ | ||
89 | 0: addis r2,r12,(.TOC.-0b)@ha; \ | ||
90 | addi r2,r2,(.TOC.-0b)@l; | ||
91 | |||
92 | #else | ||
93 | #define ENTRY_POINT(name) FUNC_START(name) | ||
94 | #define RESTORE_TOC(name) \ | ||
95 | /* Restore our TOC pointer via our opd entry */ \ | ||
96 | LOAD_REG_IMMEDIATE(r2, name) \ | ||
97 | ld r2,8(r2); | ||
98 | #endif | ||
99 | |||
100 | .text | ||
101 | |||
102 | ENTRY_POINT(ebb_handler) | ||
103 | stdu r1,-STACK_FRAME(r1) | ||
104 | SAVE_GPR(0) | ||
105 | mflr r0 | ||
106 | std r0,LR_SAVE(r1) | ||
107 | mfcr r0 | ||
108 | std r0,CCR_SAVE(r1) | ||
109 | mfctr r0 | ||
110 | std r0,CTR_SAVE(r1) | ||
111 | mfxer r0 | ||
112 | std r0,XER_SAVE(r1) | ||
113 | SAVE_GPR(2) | ||
114 | SAVE_GPR(3) | ||
115 | SAVE_GPR(4) | ||
116 | SAVE_GPR(5) | ||
117 | SAVE_GPR(6) | ||
118 | SAVE_GPR(7) | ||
119 | SAVE_GPR(8) | ||
120 | SAVE_GPR(9) | ||
121 | SAVE_GPR(10) | ||
122 | SAVE_GPR(11) | ||
123 | SAVE_GPR(12) | ||
124 | SAVE_GPR(13) | ||
125 | SAVE_GPR(14) | ||
126 | SAVE_GPR(15) | ||
127 | SAVE_GPR(16) | ||
128 | SAVE_GPR(17) | ||
129 | SAVE_GPR(18) | ||
130 | SAVE_GPR(19) | ||
131 | SAVE_GPR(20) | ||
132 | SAVE_GPR(21) | ||
133 | SAVE_GPR(22) | ||
134 | SAVE_GPR(23) | ||
135 | SAVE_GPR(24) | ||
136 | SAVE_GPR(25) | ||
137 | SAVE_GPR(26) | ||
138 | SAVE_GPR(27) | ||
139 | SAVE_GPR(28) | ||
140 | SAVE_GPR(29) | ||
141 | SAVE_GPR(30) | ||
142 | SAVE_GPR(31) | ||
143 | SAVE_VSR(0, r3) | ||
144 | mffs f0 | ||
145 | stfd f0, FSCR_SAVE(r1) | ||
146 | mfvscr f0 | ||
147 | stfd f0, VSCR_SAVE(r1) | ||
148 | SAVE_VSR(1, r3) | ||
149 | SAVE_VSR(2, r3) | ||
150 | SAVE_VSR(3, r3) | ||
151 | SAVE_VSR(4, r3) | ||
152 | SAVE_VSR(5, r3) | ||
153 | SAVE_VSR(6, r3) | ||
154 | SAVE_VSR(7, r3) | ||
155 | SAVE_VSR(8, r3) | ||
156 | SAVE_VSR(9, r3) | ||
157 | SAVE_VSR(10, r3) | ||
158 | SAVE_VSR(11, r3) | ||
159 | SAVE_VSR(12, r3) | ||
160 | SAVE_VSR(13, r3) | ||
161 | SAVE_VSR(14, r3) | ||
162 | SAVE_VSR(15, r3) | ||
163 | SAVE_VSR(16, r3) | ||
164 | SAVE_VSR(17, r3) | ||
165 | SAVE_VSR(18, r3) | ||
166 | SAVE_VSR(19, r3) | ||
167 | SAVE_VSR(20, r3) | ||
168 | SAVE_VSR(21, r3) | ||
169 | SAVE_VSR(22, r3) | ||
170 | SAVE_VSR(23, r3) | ||
171 | SAVE_VSR(24, r3) | ||
172 | SAVE_VSR(25, r3) | ||
173 | SAVE_VSR(26, r3) | ||
174 | SAVE_VSR(27, r3) | ||
175 | SAVE_VSR(28, r3) | ||
176 | SAVE_VSR(29, r3) | ||
177 | SAVE_VSR(30, r3) | ||
178 | SAVE_VSR(31, r3) | ||
179 | SAVE_VSR(32, r3) | ||
180 | SAVE_VSR(33, r3) | ||
181 | SAVE_VSR(34, r3) | ||
182 | SAVE_VSR(35, r3) | ||
183 | SAVE_VSR(36, r3) | ||
184 | SAVE_VSR(37, r3) | ||
185 | SAVE_VSR(38, r3) | ||
186 | SAVE_VSR(39, r3) | ||
187 | SAVE_VSR(40, r3) | ||
188 | SAVE_VSR(41, r3) | ||
189 | SAVE_VSR(42, r3) | ||
190 | SAVE_VSR(43, r3) | ||
191 | SAVE_VSR(44, r3) | ||
192 | SAVE_VSR(45, r3) | ||
193 | SAVE_VSR(46, r3) | ||
194 | SAVE_VSR(47, r3) | ||
195 | SAVE_VSR(48, r3) | ||
196 | SAVE_VSR(49, r3) | ||
197 | SAVE_VSR(50, r3) | ||
198 | SAVE_VSR(51, r3) | ||
199 | SAVE_VSR(52, r3) | ||
200 | SAVE_VSR(53, r3) | ||
201 | SAVE_VSR(54, r3) | ||
202 | SAVE_VSR(55, r3) | ||
203 | SAVE_VSR(56, r3) | ||
204 | SAVE_VSR(57, r3) | ||
205 | SAVE_VSR(58, r3) | ||
206 | SAVE_VSR(59, r3) | ||
207 | SAVE_VSR(60, r3) | ||
208 | SAVE_VSR(61, r3) | ||
209 | SAVE_VSR(62, r3) | ||
210 | SAVE_VSR(63, r3) | ||
211 | |||
212 | TRASH_GPR(2) | ||
213 | TRASH_GPR(3) | ||
214 | TRASH_GPR(4) | ||
215 | TRASH_GPR(5) | ||
216 | TRASH_GPR(6) | ||
217 | TRASH_GPR(7) | ||
218 | TRASH_GPR(8) | ||
219 | TRASH_GPR(9) | ||
220 | TRASH_GPR(10) | ||
221 | TRASH_GPR(11) | ||
222 | TRASH_GPR(12) | ||
223 | TRASH_GPR(14) | ||
224 | TRASH_GPR(15) | ||
225 | TRASH_GPR(16) | ||
226 | TRASH_GPR(17) | ||
227 | TRASH_GPR(18) | ||
228 | TRASH_GPR(19) | ||
229 | TRASH_GPR(20) | ||
230 | TRASH_GPR(21) | ||
231 | TRASH_GPR(22) | ||
232 | TRASH_GPR(23) | ||
233 | TRASH_GPR(24) | ||
234 | TRASH_GPR(25) | ||
235 | TRASH_GPR(26) | ||
236 | TRASH_GPR(27) | ||
237 | TRASH_GPR(28) | ||
238 | TRASH_GPR(29) | ||
239 | TRASH_GPR(30) | ||
240 | TRASH_GPR(31) | ||
241 | |||
242 | RESTORE_TOC(ebb_handler) | ||
243 | |||
244 | /* | ||
245 | * r13 is our TLS pointer. We leave whatever value was in there when the | ||
246 | * EBB fired. That seems to be OK because once set the TLS pointer is not | ||
247 | * changed - but presumably that could change in future. | ||
248 | */ | ||
249 | |||
250 | bl ebb_hook | ||
251 | nop | ||
252 | |||
253 | /* r2 may be changed here but we don't care */ | ||
254 | |||
255 | lfd f0, FSCR_SAVE(r1) | ||
256 | mtfsf 0xff,f0 | ||
257 | lfd f0, VSCR_SAVE(r1) | ||
258 | mtvscr f0 | ||
259 | LOAD_VSR(0, r3) | ||
260 | LOAD_VSR(1, r3) | ||
261 | LOAD_VSR(2, r3) | ||
262 | LOAD_VSR(3, r3) | ||
263 | LOAD_VSR(4, r3) | ||
264 | LOAD_VSR(5, r3) | ||
265 | LOAD_VSR(6, r3) | ||
266 | LOAD_VSR(7, r3) | ||
267 | LOAD_VSR(8, r3) | ||
268 | LOAD_VSR(9, r3) | ||
269 | LOAD_VSR(10, r3) | ||
270 | LOAD_VSR(11, r3) | ||
271 | LOAD_VSR(12, r3) | ||
272 | LOAD_VSR(13, r3) | ||
273 | LOAD_VSR(14, r3) | ||
274 | LOAD_VSR(15, r3) | ||
275 | LOAD_VSR(16, r3) | ||
276 | LOAD_VSR(17, r3) | ||
277 | LOAD_VSR(18, r3) | ||
278 | LOAD_VSR(19, r3) | ||
279 | LOAD_VSR(20, r3) | ||
280 | LOAD_VSR(21, r3) | ||
281 | LOAD_VSR(22, r3) | ||
282 | LOAD_VSR(23, r3) | ||
283 | LOAD_VSR(24, r3) | ||
284 | LOAD_VSR(25, r3) | ||
285 | LOAD_VSR(26, r3) | ||
286 | LOAD_VSR(27, r3) | ||
287 | LOAD_VSR(28, r3) | ||
288 | LOAD_VSR(29, r3) | ||
289 | LOAD_VSR(30, r3) | ||
290 | LOAD_VSR(31, r3) | ||
291 | LOAD_VSR(32, r3) | ||
292 | LOAD_VSR(33, r3) | ||
293 | LOAD_VSR(34, r3) | ||
294 | LOAD_VSR(35, r3) | ||
295 | LOAD_VSR(36, r3) | ||
296 | LOAD_VSR(37, r3) | ||
297 | LOAD_VSR(38, r3) | ||
298 | LOAD_VSR(39, r3) | ||
299 | LOAD_VSR(40, r3) | ||
300 | LOAD_VSR(41, r3) | ||
301 | LOAD_VSR(42, r3) | ||
302 | LOAD_VSR(43, r3) | ||
303 | LOAD_VSR(44, r3) | ||
304 | LOAD_VSR(45, r3) | ||
305 | LOAD_VSR(46, r3) | ||
306 | LOAD_VSR(47, r3) | ||
307 | LOAD_VSR(48, r3) | ||
308 | LOAD_VSR(49, r3) | ||
309 | LOAD_VSR(50, r3) | ||
310 | LOAD_VSR(51, r3) | ||
311 | LOAD_VSR(52, r3) | ||
312 | LOAD_VSR(53, r3) | ||
313 | LOAD_VSR(54, r3) | ||
314 | LOAD_VSR(55, r3) | ||
315 | LOAD_VSR(56, r3) | ||
316 | LOAD_VSR(57, r3) | ||
317 | LOAD_VSR(58, r3) | ||
318 | LOAD_VSR(59, r3) | ||
319 | LOAD_VSR(60, r3) | ||
320 | LOAD_VSR(61, r3) | ||
321 | LOAD_VSR(62, r3) | ||
322 | LOAD_VSR(63, r3) | ||
323 | |||
324 | ld r0,XER_SAVE(r1) | ||
325 | mtxer r0 | ||
326 | ld r0,CTR_SAVE(r1) | ||
327 | mtctr r0 | ||
328 | ld r0,LR_SAVE(r1) | ||
329 | mtlr r0 | ||
330 | ld r0,CCR_SAVE(r1) | ||
331 | mtcr r0 | ||
332 | REST_GPR(0) | ||
333 | REST_GPR(2) | ||
334 | REST_GPR(3) | ||
335 | REST_GPR(4) | ||
336 | REST_GPR(5) | ||
337 | REST_GPR(6) | ||
338 | REST_GPR(7) | ||
339 | REST_GPR(8) | ||
340 | REST_GPR(9) | ||
341 | REST_GPR(10) | ||
342 | REST_GPR(11) | ||
343 | REST_GPR(12) | ||
344 | REST_GPR(13) | ||
345 | REST_GPR(14) | ||
346 | REST_GPR(15) | ||
347 | REST_GPR(16) | ||
348 | REST_GPR(17) | ||
349 | REST_GPR(18) | ||
350 | REST_GPR(19) | ||
351 | REST_GPR(20) | ||
352 | REST_GPR(21) | ||
353 | REST_GPR(22) | ||
354 | REST_GPR(23) | ||
355 | REST_GPR(24) | ||
356 | REST_GPR(25) | ||
357 | REST_GPR(26) | ||
358 | REST_GPR(27) | ||
359 | REST_GPR(28) | ||
360 | REST_GPR(29) | ||
361 | REST_GPR(30) | ||
362 | REST_GPR(31) | ||
363 | addi r1,r1,STACK_FRAME | ||
364 | RFEBB | ||
365 | FUNC_END(ebb_handler) | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_child_test.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_child_test.c new file mode 100644 index 000000000000..c45f948148e1 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_child_test.c | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <signal.h> | ||
7 | #include <stdio.h> | ||
8 | #include <stdlib.h> | ||
9 | #include <stdbool.h> | ||
10 | #include <sys/types.h> | ||
11 | #include <sys/wait.h> | ||
12 | #include <unistd.h> | ||
13 | |||
14 | #include "ebb.h" | ||
15 | |||
16 | |||
17 | /* | ||
18 | * Tests we can setup an EBB on our child. Nothing interesting happens, because | ||
19 | * even though the event is enabled and running the child hasn't enabled the | ||
20 | * actual delivery of the EBBs. | ||
21 | */ | ||
22 | |||
23 | static int victim_child(union pipe read_pipe, union pipe write_pipe) | ||
24 | { | ||
25 | int i; | ||
26 | |||
27 | FAIL_IF(wait_for_parent(read_pipe)); | ||
28 | FAIL_IF(notify_parent(write_pipe)); | ||
29 | |||
30 | /* Parent creates EBB event */ | ||
31 | |||
32 | FAIL_IF(wait_for_parent(read_pipe)); | ||
33 | FAIL_IF(notify_parent(write_pipe)); | ||
34 | |||
35 | /* Check the EBB is enabled by writing PMC1 */ | ||
36 | write_pmc1(); | ||
37 | |||
38 | /* EBB event is enabled here */ | ||
39 | for (i = 0; i < 1000000; i++) ; | ||
40 | |||
41 | return 0; | ||
42 | } | ||
43 | |||
44 | int ebb_on_child(void) | ||
45 | { | ||
46 | union pipe read_pipe, write_pipe; | ||
47 | struct event event; | ||
48 | pid_t pid; | ||
49 | |||
50 | FAIL_IF(pipe(read_pipe.fds) == -1); | ||
51 | FAIL_IF(pipe(write_pipe.fds) == -1); | ||
52 | |||
53 | pid = fork(); | ||
54 | if (pid == 0) { | ||
55 | /* NB order of pipes looks reversed */ | ||
56 | exit(victim_child(write_pipe, read_pipe)); | ||
57 | } | ||
58 | |||
59 | FAIL_IF(sync_with_child(read_pipe, write_pipe)); | ||
60 | |||
61 | /* Child is running now */ | ||
62 | |||
63 | event_init_named(&event, 0x1001e, "cycles"); | ||
64 | event_leader_ebb_init(&event); | ||
65 | |||
66 | event.attr.exclude_kernel = 1; | ||
67 | event.attr.exclude_hv = 1; | ||
68 | event.attr.exclude_idle = 1; | ||
69 | |||
70 | FAIL_IF(event_open_with_pid(&event, pid)); | ||
71 | FAIL_IF(ebb_event_enable(&event)); | ||
72 | |||
73 | FAIL_IF(sync_with_child(read_pipe, write_pipe)); | ||
74 | |||
75 | /* Child should just exit happily */ | ||
76 | FAIL_IF(wait_for_child(pid)); | ||
77 | |||
78 | event_close(&event); | ||
79 | |||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | int main(void) | ||
84 | { | ||
85 | return test_harness(ebb_on_child, "ebb_on_child"); | ||
86 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c new file mode 100644 index 000000000000..11acf1d55f8d --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c | |||
@@ -0,0 +1,92 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <signal.h> | ||
7 | #include <stdio.h> | ||
8 | #include <stdlib.h> | ||
9 | #include <stdbool.h> | ||
10 | #include <sys/types.h> | ||
11 | #include <sys/wait.h> | ||
12 | #include <unistd.h> | ||
13 | |||
14 | #include "ebb.h" | ||
15 | |||
16 | |||
17 | /* | ||
18 | * Tests we can setup an EBB on our child. The child expects this and enables | ||
19 | * EBBs, which are then delivered to the child, even though the event is | ||
20 | * created by the parent. | ||
21 | */ | ||
22 | |||
23 | static int victim_child(union pipe read_pipe, union pipe write_pipe) | ||
24 | { | ||
25 | FAIL_IF(wait_for_parent(read_pipe)); | ||
26 | |||
27 | /* Setup our EBB handler, before the EBB event is created */ | ||
28 | ebb_enable_pmc_counting(1); | ||
29 | setup_ebb_handler(standard_ebb_callee); | ||
30 | ebb_global_enable(); | ||
31 | |||
32 | FAIL_IF(notify_parent(write_pipe)); | ||
33 | |||
34 | while (ebb_state.stats.ebb_count < 20) { | ||
35 | FAIL_IF(core_busy_loop()); | ||
36 | } | ||
37 | |||
38 | ebb_global_disable(); | ||
39 | ebb_freeze_pmcs(); | ||
40 | |||
41 | count_pmc(1, sample_period); | ||
42 | |||
43 | dump_ebb_state(); | ||
44 | |||
45 | FAIL_IF(ebb_state.stats.ebb_count == 0); | ||
46 | |||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | /* Tests we can setup an EBB on our child - if it's expecting it */ | ||
51 | int ebb_on_willing_child(void) | ||
52 | { | ||
53 | union pipe read_pipe, write_pipe; | ||
54 | struct event event; | ||
55 | pid_t pid; | ||
56 | |||
57 | FAIL_IF(pipe(read_pipe.fds) == -1); | ||
58 | FAIL_IF(pipe(write_pipe.fds) == -1); | ||
59 | |||
60 | pid = fork(); | ||
61 | if (pid == 0) { | ||
62 | /* NB order of pipes looks reversed */ | ||
63 | exit(victim_child(write_pipe, read_pipe)); | ||
64 | } | ||
65 | |||
66 | /* Signal the child to setup its EBB handler */ | ||
67 | FAIL_IF(sync_with_child(read_pipe, write_pipe)); | ||
68 | |||
69 | /* Child is running now */ | ||
70 | |||
71 | event_init_named(&event, 0x1001e, "cycles"); | ||
72 | event_leader_ebb_init(&event); | ||
73 | |||
74 | event.attr.exclude_kernel = 1; | ||
75 | event.attr.exclude_hv = 1; | ||
76 | event.attr.exclude_idle = 1; | ||
77 | |||
78 | FAIL_IF(event_open_with_pid(&event, pid)); | ||
79 | FAIL_IF(ebb_event_enable(&event)); | ||
80 | |||
81 | /* Child show now take EBBs and then exit */ | ||
82 | FAIL_IF(wait_for_child(pid)); | ||
83 | |||
84 | event_close(&event); | ||
85 | |||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | int main(void) | ||
90 | { | ||
91 | return test_harness(ebb_on_willing_child, "ebb_on_willing_child"); | ||
92 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c new file mode 100644 index 000000000000..be4dd5a4e98e --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <signal.h> | ||
7 | #include <stdio.h> | ||
8 | #include <stdlib.h> | ||
9 | #include <stdbool.h> | ||
10 | #include <sys/types.h> | ||
11 | #include <sys/wait.h> | ||
12 | #include <unistd.h> | ||
13 | |||
14 | #include "ebb.h" | ||
15 | |||
16 | |||
17 | /* | ||
18 | * Tests an EBB vs a cpu event - in that order. The EBB should force the cpu | ||
19 | * event off the PMU. | ||
20 | */ | ||
21 | |||
22 | static int setup_cpu_event(struct event *event, int cpu) | ||
23 | { | ||
24 | event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); | ||
25 | |||
26 | event->attr.exclude_kernel = 1; | ||
27 | event->attr.exclude_hv = 1; | ||
28 | event->attr.exclude_idle = 1; | ||
29 | |||
30 | SKIP_IF(require_paranoia_below(1)); | ||
31 | FAIL_IF(event_open_with_cpu(event, cpu)); | ||
32 | FAIL_IF(event_enable(event)); | ||
33 | |||
34 | return 0; | ||
35 | } | ||
36 | |||
37 | int ebb_vs_cpu_event(void) | ||
38 | { | ||
39 | union pipe read_pipe, write_pipe; | ||
40 | struct event event; | ||
41 | int cpu, rc; | ||
42 | pid_t pid; | ||
43 | |||
44 | cpu = pick_online_cpu(); | ||
45 | FAIL_IF(cpu < 0); | ||
46 | FAIL_IF(bind_to_cpu(cpu)); | ||
47 | |||
48 | FAIL_IF(pipe(read_pipe.fds) == -1); | ||
49 | FAIL_IF(pipe(write_pipe.fds) == -1); | ||
50 | |||
51 | pid = fork(); | ||
52 | if (pid == 0) { | ||
53 | /* NB order of pipes looks reversed */ | ||
54 | exit(ebb_child(write_pipe, read_pipe)); | ||
55 | } | ||
56 | |||
57 | /* Signal the child to install its EBB event and wait */ | ||
58 | FAIL_IF(sync_with_child(read_pipe, write_pipe)); | ||
59 | |||
60 | /* Now try to install our CPU event */ | ||
61 | rc = setup_cpu_event(&event, cpu); | ||
62 | if (rc) { | ||
63 | kill_child_and_wait(pid); | ||
64 | return rc; | ||
65 | } | ||
66 | |||
67 | /* Signal the child to run */ | ||
68 | FAIL_IF(sync_with_child(read_pipe, write_pipe)); | ||
69 | |||
70 | /* .. and wait for it to complete */ | ||
71 | FAIL_IF(wait_for_child(pid)); | ||
72 | FAIL_IF(event_disable(&event)); | ||
73 | FAIL_IF(event_read(&event)); | ||
74 | |||
75 | event_report(&event); | ||
76 | |||
77 | /* The cpu event may have run, but we don't expect 100% */ | ||
78 | FAIL_IF(event.result.enabled >= event.result.running); | ||
79 | |||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | int main(void) | ||
84 | { | ||
85 | return test_harness(ebb_vs_cpu_event, "ebb_vs_cpu_event"); | ||
86 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c b/tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c new file mode 100644 index 000000000000..7e78153f08eb --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c | |||
@@ -0,0 +1,131 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <stdio.h> | ||
7 | #include <stdlib.h> | ||
8 | |||
9 | #include "ebb.h" | ||
10 | |||
11 | |||
12 | /* | ||
13 | * Test various attributes of the EBB event are enforced. | ||
14 | */ | ||
15 | int event_attributes(void) | ||
16 | { | ||
17 | struct event event, leader; | ||
18 | |||
19 | event_init(&event, 0x1001e); | ||
20 | event_leader_ebb_init(&event); | ||
21 | /* Expected to succeed */ | ||
22 | FAIL_IF(event_open(&event)); | ||
23 | event_close(&event); | ||
24 | |||
25 | |||
26 | event_init(&event, 0x001e); /* CYCLES - no PMC specified */ | ||
27 | event_leader_ebb_init(&event); | ||
28 | /* Expected to fail, no PMC specified */ | ||
29 | FAIL_IF(event_open(&event) == 0); | ||
30 | |||
31 | |||
32 | event_init(&event, 0x2001e); | ||
33 | event_leader_ebb_init(&event); | ||
34 | event.attr.exclusive = 0; | ||
35 | /* Expected to fail, not exclusive */ | ||
36 | FAIL_IF(event_open(&event) == 0); | ||
37 | |||
38 | |||
39 | event_init(&event, 0x3001e); | ||
40 | event_leader_ebb_init(&event); | ||
41 | event.attr.freq = 1; | ||
42 | /* Expected to fail, sets freq */ | ||
43 | FAIL_IF(event_open(&event) == 0); | ||
44 | |||
45 | |||
46 | event_init(&event, 0x4001e); | ||
47 | event_leader_ebb_init(&event); | ||
48 | event.attr.sample_period = 1; | ||
49 | /* Expected to fail, sets sample_period */ | ||
50 | FAIL_IF(event_open(&event) == 0); | ||
51 | |||
52 | |||
53 | event_init(&event, 0x1001e); | ||
54 | event_leader_ebb_init(&event); | ||
55 | event.attr.enable_on_exec = 1; | ||
56 | /* Expected to fail, sets enable_on_exec */ | ||
57 | FAIL_IF(event_open(&event) == 0); | ||
58 | |||
59 | |||
60 | event_init(&event, 0x1001e); | ||
61 | event_leader_ebb_init(&event); | ||
62 | event.attr.inherit = 1; | ||
63 | /* Expected to fail, sets inherit */ | ||
64 | FAIL_IF(event_open(&event) == 0); | ||
65 | |||
66 | |||
67 | event_init(&leader, 0x1001e); | ||
68 | event_leader_ebb_init(&leader); | ||
69 | FAIL_IF(event_open(&leader)); | ||
70 | |||
71 | event_init(&event, 0x20002); | ||
72 | event_ebb_init(&event); | ||
73 | |||
74 | /* Expected to succeed */ | ||
75 | FAIL_IF(event_open_with_group(&event, leader.fd)); | ||
76 | event_close(&leader); | ||
77 | event_close(&event); | ||
78 | |||
79 | |||
80 | event_init(&leader, 0x1001e); | ||
81 | event_leader_ebb_init(&leader); | ||
82 | FAIL_IF(event_open(&leader)); | ||
83 | |||
84 | event_init(&event, 0x20002); | ||
85 | |||
86 | /* Expected to fail, event doesn't request EBB, leader does */ | ||
87 | FAIL_IF(event_open_with_group(&event, leader.fd) == 0); | ||
88 | event_close(&leader); | ||
89 | |||
90 | |||
91 | event_init(&leader, 0x1001e); | ||
92 | event_leader_ebb_init(&leader); | ||
93 | /* Clear the EBB flag */ | ||
94 | leader.attr.config &= ~(1ull << 63); | ||
95 | |||
96 | FAIL_IF(event_open(&leader)); | ||
97 | |||
98 | event_init(&event, 0x20002); | ||
99 | event_ebb_init(&event); | ||
100 | |||
101 | /* Expected to fail, leader doesn't request EBB */ | ||
102 | FAIL_IF(event_open_with_group(&event, leader.fd) == 0); | ||
103 | event_close(&leader); | ||
104 | |||
105 | |||
106 | event_init(&leader, 0x1001e); | ||
107 | event_leader_ebb_init(&leader); | ||
108 | leader.attr.exclusive = 0; | ||
109 | /* Expected to fail, leader isn't exclusive */ | ||
110 | FAIL_IF(event_open(&leader) == 0); | ||
111 | |||
112 | |||
113 | event_init(&leader, 0x1001e); | ||
114 | event_leader_ebb_init(&leader); | ||
115 | leader.attr.pinned = 0; | ||
116 | /* Expected to fail, leader isn't pinned */ | ||
117 | FAIL_IF(event_open(&leader) == 0); | ||
118 | |||
119 | event_init(&event, 0x1001e); | ||
120 | event_leader_ebb_init(&event); | ||
121 | /* Expected to fail, not a task event */ | ||
122 | SKIP_IF(require_paranoia_below(1)); | ||
123 | FAIL_IF(event_open_with_cpu(&event, 0) == 0); | ||
124 | |||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | int main(void) | ||
129 | { | ||
130 | return test_harness(event_attributes, "event_attributes"); | ||
131 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S b/tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S new file mode 100644 index 000000000000..b866a0581d32 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S | |||
@@ -0,0 +1,43 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <ppc-asm.h> | ||
7 | |||
8 | .text | ||
9 | |||
10 | FUNC_START(thirty_two_instruction_loop) | ||
11 | cmpwi r3,0 | ||
12 | beqlr | ||
13 | addi r4,r3,1 | ||
14 | addi r4,r4,1 | ||
15 | addi r4,r4,1 | ||
16 | addi r4,r4,1 | ||
17 | addi r4,r4,1 | ||
18 | addi r4,r4,1 | ||
19 | addi r4,r4,1 | ||
20 | addi r4,r4,1 | ||
21 | addi r4,r4,1 | ||
22 | addi r4,r4,1 | ||
23 | addi r4,r4,1 | ||
24 | addi r4,r4,1 | ||
25 | addi r4,r4,1 | ||
26 | addi r4,r4,1 | ||
27 | addi r4,r4,1 | ||
28 | addi r4,r4,1 | ||
29 | addi r4,r4,1 | ||
30 | addi r4,r4,1 | ||
31 | addi r4,r4,1 | ||
32 | addi r4,r4,1 | ||
33 | addi r4,r4,1 | ||
34 | addi r4,r4,1 | ||
35 | addi r4,r4,1 | ||
36 | addi r4,r4,1 | ||
37 | addi r4,r4,1 | ||
38 | addi r4,r4,1 | ||
39 | addi r4,r4,1 | ||
40 | addi r4,r4,1 # 28 addi's | ||
41 | subi r3,r3,1 | ||
42 | b FUNC_NAME(thirty_two_instruction_loop) | ||
43 | FUNC_END(thirty_two_instruction_loop) | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c b/tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c new file mode 100644 index 000000000000..9e7af6e76622 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <signal.h> | ||
7 | #include <stdio.h> | ||
8 | #include <stdlib.h> | ||
9 | #include <stdbool.h> | ||
10 | #include <sys/types.h> | ||
11 | #include <sys/wait.h> | ||
12 | #include <unistd.h> | ||
13 | #include <setjmp.h> | ||
14 | #include <signal.h> | ||
15 | |||
16 | #include "ebb.h" | ||
17 | |||
18 | |||
19 | /* | ||
20 | * Test that a fork clears the PMU state of the child. eg. BESCR/EBBHR/EBBRR | ||
21 | * are cleared, and MMCR0_PMCC is reset, preventing the child from accessing | ||
22 | * the PMU. | ||
23 | */ | ||
24 | |||
25 | static struct event event; | ||
26 | |||
27 | static int child(void) | ||
28 | { | ||
29 | /* Even though we have EBE=0 we can still see the EBB regs */ | ||
30 | FAIL_IF(mfspr(SPRN_BESCR) != 0); | ||
31 | FAIL_IF(mfspr(SPRN_EBBHR) != 0); | ||
32 | FAIL_IF(mfspr(SPRN_EBBRR) != 0); | ||
33 | |||
34 | FAIL_IF(catch_sigill(write_pmc1)); | ||
35 | |||
36 | /* We can still read from the event, though it is on our parent */ | ||
37 | FAIL_IF(event_read(&event)); | ||
38 | |||
39 | return 0; | ||
40 | } | ||
41 | |||
42 | /* Tests that fork clears EBB state */ | ||
43 | int fork_cleanup(void) | ||
44 | { | ||
45 | pid_t pid; | ||
46 | |||
47 | event_init_named(&event, 0x1001e, "cycles"); | ||
48 | event_leader_ebb_init(&event); | ||
49 | |||
50 | FAIL_IF(event_open(&event)); | ||
51 | |||
52 | ebb_enable_pmc_counting(1); | ||
53 | setup_ebb_handler(standard_ebb_callee); | ||
54 | ebb_global_enable(); | ||
55 | |||
56 | FAIL_IF(ebb_event_enable(&event)); | ||
57 | |||
58 | mtspr(SPRN_MMCR0, MMCR0_FC); | ||
59 | mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); | ||
60 | |||
61 | /* Don't need to actually take any EBBs */ | ||
62 | |||
63 | pid = fork(); | ||
64 | if (pid == 0) | ||
65 | exit(child()); | ||
66 | |||
67 | /* Child does the actual testing */ | ||
68 | FAIL_IF(wait_for_child(pid)); | ||
69 | |||
70 | /* After fork */ | ||
71 | event_close(&event); | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | int main(void) | ||
77 | { | ||
78 | return test_harness(fork_cleanup, "fork_cleanup"); | ||
79 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c b/tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c new file mode 100644 index 000000000000..f8190fa29592 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c | |||
@@ -0,0 +1,164 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #define _GNU_SOURCE | ||
7 | |||
8 | #include <stdio.h> | ||
9 | #include <stdbool.h> | ||
10 | #include <string.h> | ||
11 | #include <sys/prctl.h> | ||
12 | |||
13 | #include "ebb.h" | ||
14 | |||
15 | |||
16 | /* | ||
17 | * Run a calibrated instruction loop and count instructions executed using | ||
18 | * EBBs. Make sure the counts look right. | ||
19 | */ | ||
20 | |||
21 | extern void thirty_two_instruction_loop(uint64_t loops); | ||
22 | |||
23 | static bool counters_frozen = true; | ||
24 | |||
25 | static int do_count_loop(struct event *event, uint64_t instructions, | ||
26 | uint64_t overhead, bool report) | ||
27 | { | ||
28 | int64_t difference, expected; | ||
29 | double percentage; | ||
30 | |||
31 | clear_ebb_stats(); | ||
32 | |||
33 | counters_frozen = false; | ||
34 | mb(); | ||
35 | mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_FC); | ||
36 | |||
37 | thirty_two_instruction_loop(instructions >> 5); | ||
38 | |||
39 | counters_frozen = true; | ||
40 | mb(); | ||
41 | mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) | MMCR0_FC); | ||
42 | |||
43 | count_pmc(4, sample_period); | ||
44 | |||
45 | event->result.value = ebb_state.stats.pmc_count[4-1]; | ||
46 | expected = instructions + overhead; | ||
47 | difference = event->result.value - expected; | ||
48 | percentage = (double)difference / event->result.value * 100; | ||
49 | |||
50 | if (report) { | ||
51 | printf("Looped for %lu instructions, overhead %lu\n", instructions, overhead); | ||
52 | printf("Expected %lu\n", expected); | ||
53 | printf("Actual %llu\n", event->result.value); | ||
54 | printf("Error %ld, %f%%\n", difference, percentage); | ||
55 | printf("Took %d EBBs\n", ebb_state.stats.ebb_count); | ||
56 | } | ||
57 | |||
58 | if (difference < 0) | ||
59 | difference = -difference; | ||
60 | |||
61 | /* Tolerate a difference of up to 0.0001 % */ | ||
62 | difference *= 10000 * 100; | ||
63 | if (difference / event->result.value) | ||
64 | return -1; | ||
65 | |||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | /* Count how many instructions it takes to do a null loop */ | ||
70 | static uint64_t determine_overhead(struct event *event) | ||
71 | { | ||
72 | uint64_t current, overhead; | ||
73 | int i; | ||
74 | |||
75 | do_count_loop(event, 0, 0, false); | ||
76 | overhead = event->result.value; | ||
77 | |||
78 | for (i = 0; i < 100; i++) { | ||
79 | do_count_loop(event, 0, 0, false); | ||
80 | current = event->result.value; | ||
81 | if (current < overhead) { | ||
82 | printf("Replacing overhead %lu with %lu\n", overhead, current); | ||
83 | overhead = current; | ||
84 | } | ||
85 | } | ||
86 | |||
87 | return overhead; | ||
88 | } | ||
89 | |||
90 | static void pmc4_ebb_callee(void) | ||
91 | { | ||
92 | uint64_t val; | ||
93 | |||
94 | val = mfspr(SPRN_BESCR); | ||
95 | if (!(val & BESCR_PMEO)) { | ||
96 | ebb_state.stats.spurious++; | ||
97 | goto out; | ||
98 | } | ||
99 | |||
100 | ebb_state.stats.ebb_count++; | ||
101 | count_pmc(4, sample_period); | ||
102 | out: | ||
103 | if (counters_frozen) | ||
104 | reset_ebb_with_clear_mask(MMCR0_PMAO); | ||
105 | else | ||
106 | reset_ebb(); | ||
107 | } | ||
108 | |||
109 | int instruction_count(void) | ||
110 | { | ||
111 | struct event event; | ||
112 | uint64_t overhead; | ||
113 | |||
114 | event_init_named(&event, 0x400FA, "PM_RUN_INST_CMPL"); | ||
115 | event_leader_ebb_init(&event); | ||
116 | event.attr.exclude_kernel = 1; | ||
117 | event.attr.exclude_hv = 1; | ||
118 | event.attr.exclude_idle = 1; | ||
119 | |||
120 | FAIL_IF(event_open(&event)); | ||
121 | FAIL_IF(ebb_event_enable(&event)); | ||
122 | |||
123 | sample_period = COUNTER_OVERFLOW; | ||
124 | |||
125 | setup_ebb_handler(pmc4_ebb_callee); | ||
126 | mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_FC); | ||
127 | ebb_global_enable(); | ||
128 | |||
129 | overhead = determine_overhead(&event); | ||
130 | printf("Overhead of null loop: %lu instructions\n", overhead); | ||
131 | |||
132 | /* Run for 1M instructions */ | ||
133 | FAIL_IF(do_count_loop(&event, 0x100000, overhead, true)); | ||
134 | |||
135 | /* Run for 10M instructions */ | ||
136 | FAIL_IF(do_count_loop(&event, 0xa00000, overhead, true)); | ||
137 | |||
138 | /* Run for 100M instructions */ | ||
139 | FAIL_IF(do_count_loop(&event, 0x6400000, overhead, true)); | ||
140 | |||
141 | /* Run for 1G instructions */ | ||
142 | FAIL_IF(do_count_loop(&event, 0x40000000, overhead, true)); | ||
143 | |||
144 | /* Run for 16G instructions */ | ||
145 | FAIL_IF(do_count_loop(&event, 0x400000000, overhead, true)); | ||
146 | |||
147 | /* Run for 64G instructions */ | ||
148 | FAIL_IF(do_count_loop(&event, 0x1000000000, overhead, true)); | ||
149 | |||
150 | /* Run for 128G instructions */ | ||
151 | FAIL_IF(do_count_loop(&event, 0x2000000000, overhead, true)); | ||
152 | |||
153 | ebb_global_disable(); | ||
154 | event_close(&event); | ||
155 | |||
156 | printf("Finished OK\n"); | ||
157 | |||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | int main(void) | ||
162 | { | ||
163 | return test_harness(instruction_count, "instruction_count"); | ||
164 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c new file mode 100644 index 000000000000..0c9dd9b2e39d --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c | |||
@@ -0,0 +1,100 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <sched.h> | ||
7 | #include <signal.h> | ||
8 | #include <stdio.h> | ||
9 | #include <stdlib.h> | ||
10 | #include <sys/mman.h> | ||
11 | |||
12 | #include "ebb.h" | ||
13 | |||
14 | |||
15 | /* | ||
16 | * Test that tries to trigger CPU_FTR_PMAO_BUG. Which is a hardware defect | ||
17 | * where an exception triggers but we context switch before it is delivered and | ||
18 | * lose the exception. | ||
19 | */ | ||
20 | |||
21 | static int test_body(void) | ||
22 | { | ||
23 | int i, orig_period, max_period; | ||
24 | struct event event; | ||
25 | |||
26 | /* We use PMC4 to make sure the kernel switches all counters correctly */ | ||
27 | event_init_named(&event, 0x40002, "instructions"); | ||
28 | event_leader_ebb_init(&event); | ||
29 | |||
30 | event.attr.exclude_kernel = 1; | ||
31 | event.attr.exclude_hv = 1; | ||
32 | event.attr.exclude_idle = 1; | ||
33 | |||
34 | FAIL_IF(event_open(&event)); | ||
35 | |||
36 | ebb_enable_pmc_counting(4); | ||
37 | setup_ebb_handler(standard_ebb_callee); | ||
38 | ebb_global_enable(); | ||
39 | FAIL_IF(ebb_event_enable(&event)); | ||
40 | |||
41 | /* | ||
42 | * We want a low sample period, but we also want to get out of the EBB | ||
43 | * handler without tripping up again. | ||
44 | * | ||
45 | * This value picked after much experimentation. | ||
46 | */ | ||
47 | orig_period = max_period = sample_period = 400; | ||
48 | |||
49 | mtspr(SPRN_PMC4, pmc_sample_period(sample_period)); | ||
50 | |||
51 | while (ebb_state.stats.ebb_count < 1000000) { | ||
52 | /* | ||
53 | * We are trying to get the EBB exception to race exactly with | ||
54 | * us entering the kernel to do the syscall. We then need the | ||
55 | * kernel to decide our timeslice is up and context switch to | ||
56 | * the other thread. When we come back our EBB will have been | ||
57 | * lost and we'll spin in this while loop forever. | ||
58 | */ | ||
59 | |||
60 | for (i = 0; i < 100000; i++) | ||
61 | sched_yield(); | ||
62 | |||
63 | /* Change the sample period slightly to try and hit the race */ | ||
64 | if (sample_period >= (orig_period + 200)) | ||
65 | sample_period = orig_period; | ||
66 | else | ||
67 | sample_period++; | ||
68 | |||
69 | if (sample_period > max_period) | ||
70 | max_period = sample_period; | ||
71 | } | ||
72 | |||
73 | ebb_freeze_pmcs(); | ||
74 | ebb_global_disable(); | ||
75 | |||
76 | count_pmc(4, sample_period); | ||
77 | mtspr(SPRN_PMC4, 0xdead); | ||
78 | |||
79 | dump_summary_ebb_state(); | ||
80 | dump_ebb_hw_state(); | ||
81 | |||
82 | event_close(&event); | ||
83 | |||
84 | FAIL_IF(ebb_state.stats.ebb_count == 0); | ||
85 | |||
86 | /* We vary our sample period so we need extra fudge here */ | ||
87 | FAIL_IF(!ebb_check_count(4, orig_period, 2 * (max_period - orig_period))); | ||
88 | |||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | static int lost_exception(void) | ||
93 | { | ||
94 | return eat_cpu(test_body); | ||
95 | } | ||
96 | |||
97 | int main(void) | ||
98 | { | ||
99 | return test_harness(lost_exception, "lost_exception"); | ||
100 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c new file mode 100644 index 000000000000..67d78af3284c --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <stdio.h> | ||
7 | #include <stdlib.h> | ||
8 | #include <sys/ioctl.h> | ||
9 | |||
10 | #include "ebb.h" | ||
11 | |||
12 | |||
13 | /* | ||
14 | * Test counting multiple events using EBBs. | ||
15 | */ | ||
16 | int multi_counter(void) | ||
17 | { | ||
18 | struct event events[6]; | ||
19 | int i, group_fd; | ||
20 | |||
21 | event_init_named(&events[0], 0x1001C, "PM_CMPLU_STALL_THRD"); | ||
22 | event_init_named(&events[1], 0x2D016, "PM_CMPLU_STALL_FXU"); | ||
23 | event_init_named(&events[2], 0x30006, "PM_CMPLU_STALL_OTHER_CMPL"); | ||
24 | event_init_named(&events[3], 0x4000A, "PM_CMPLU_STALL"); | ||
25 | event_init_named(&events[4], 0x600f4, "PM_RUN_CYC"); | ||
26 | event_init_named(&events[5], 0x500fa, "PM_RUN_INST_CMPL"); | ||
27 | |||
28 | event_leader_ebb_init(&events[0]); | ||
29 | for (i = 1; i < 6; i++) | ||
30 | event_ebb_init(&events[i]); | ||
31 | |||
32 | group_fd = -1; | ||
33 | for (i = 0; i < 6; i++) { | ||
34 | events[i].attr.exclude_kernel = 1; | ||
35 | events[i].attr.exclude_hv = 1; | ||
36 | events[i].attr.exclude_idle = 1; | ||
37 | |||
38 | FAIL_IF(event_open_with_group(&events[i], group_fd)); | ||
39 | if (group_fd == -1) | ||
40 | group_fd = events[0].fd; | ||
41 | } | ||
42 | |||
43 | ebb_enable_pmc_counting(1); | ||
44 | ebb_enable_pmc_counting(2); | ||
45 | ebb_enable_pmc_counting(3); | ||
46 | ebb_enable_pmc_counting(4); | ||
47 | ebb_enable_pmc_counting(5); | ||
48 | ebb_enable_pmc_counting(6); | ||
49 | setup_ebb_handler(standard_ebb_callee); | ||
50 | |||
51 | FAIL_IF(ioctl(events[0].fd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP)); | ||
52 | FAIL_IF(event_read(&events[0])); | ||
53 | |||
54 | ebb_global_enable(); | ||
55 | |||
56 | mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); | ||
57 | mtspr(SPRN_PMC2, pmc_sample_period(sample_period)); | ||
58 | mtspr(SPRN_PMC3, pmc_sample_period(sample_period)); | ||
59 | mtspr(SPRN_PMC4, pmc_sample_period(sample_period)); | ||
60 | mtspr(SPRN_PMC5, pmc_sample_period(sample_period)); | ||
61 | mtspr(SPRN_PMC6, pmc_sample_period(sample_period)); | ||
62 | |||
63 | while (ebb_state.stats.ebb_count < 50) { | ||
64 | FAIL_IF(core_busy_loop()); | ||
65 | FAIL_IF(ebb_check_mmcr0()); | ||
66 | } | ||
67 | |||
68 | ebb_global_disable(); | ||
69 | ebb_freeze_pmcs(); | ||
70 | |||
71 | count_pmc(1, sample_period); | ||
72 | count_pmc(2, sample_period); | ||
73 | count_pmc(3, sample_period); | ||
74 | count_pmc(4, sample_period); | ||
75 | count_pmc(5, sample_period); | ||
76 | count_pmc(6, sample_period); | ||
77 | |||
78 | dump_ebb_state(); | ||
79 | |||
80 | for (i = 0; i < 6; i++) | ||
81 | event_close(&events[i]); | ||
82 | |||
83 | FAIL_IF(ebb_state.stats.ebb_count == 0); | ||
84 | |||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | int main(void) | ||
89 | { | ||
90 | return test_harness(multi_counter, "multi_counter"); | ||
91 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c new file mode 100644 index 000000000000..b8dc371f9338 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c | |||
@@ -0,0 +1,109 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <stdbool.h> | ||
7 | #include <stdio.h> | ||
8 | #include <stdlib.h> | ||
9 | #include <signal.h> | ||
10 | |||
11 | #include "ebb.h" | ||
12 | |||
13 | |||
14 | /* | ||
15 | * Test running multiple EBB using processes at once on a single CPU. They | ||
16 | * should all run happily without interfering with each other. | ||
17 | */ | ||
18 | |||
19 | static bool child_should_exit; | ||
20 | |||
21 | static void sigint_handler(int signal) | ||
22 | { | ||
23 | child_should_exit = true; | ||
24 | } | ||
25 | |||
26 | struct sigaction sigint_action = { | ||
27 | .sa_handler = sigint_handler, | ||
28 | }; | ||
29 | |||
30 | static int cycles_child(void) | ||
31 | { | ||
32 | struct event event; | ||
33 | |||
34 | if (sigaction(SIGINT, &sigint_action, NULL)) { | ||
35 | perror("sigaction"); | ||
36 | return 1; | ||
37 | } | ||
38 | |||
39 | event_init_named(&event, 0x1001e, "cycles"); | ||
40 | event_leader_ebb_init(&event); | ||
41 | |||
42 | event.attr.exclude_kernel = 1; | ||
43 | event.attr.exclude_hv = 1; | ||
44 | event.attr.exclude_idle = 1; | ||
45 | |||
46 | FAIL_IF(event_open(&event)); | ||
47 | |||
48 | ebb_enable_pmc_counting(1); | ||
49 | setup_ebb_handler(standard_ebb_callee); | ||
50 | ebb_global_enable(); | ||
51 | |||
52 | FAIL_IF(ebb_event_enable(&event)); | ||
53 | |||
54 | mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); | ||
55 | |||
56 | while (!child_should_exit) { | ||
57 | FAIL_IF(core_busy_loop()); | ||
58 | FAIL_IF(ebb_check_mmcr0()); | ||
59 | } | ||
60 | |||
61 | ebb_global_disable(); | ||
62 | ebb_freeze_pmcs(); | ||
63 | |||
64 | count_pmc(1, sample_period); | ||
65 | |||
66 | dump_summary_ebb_state(); | ||
67 | |||
68 | event_close(&event); | ||
69 | |||
70 | FAIL_IF(ebb_state.stats.ebb_count == 0); | ||
71 | |||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | #define NR_CHILDREN 4 | ||
76 | |||
77 | int multi_ebb_procs(void) | ||
78 | { | ||
79 | pid_t pids[NR_CHILDREN]; | ||
80 | int cpu, rc, i; | ||
81 | |||
82 | cpu = pick_online_cpu(); | ||
83 | FAIL_IF(cpu < 0); | ||
84 | FAIL_IF(bind_to_cpu(cpu)); | ||
85 | |||
86 | for (i = 0; i < NR_CHILDREN; i++) { | ||
87 | pids[i] = fork(); | ||
88 | if (pids[i] == 0) | ||
89 | exit(cycles_child()); | ||
90 | } | ||
91 | |||
92 | /* Have them all run for "a while" */ | ||
93 | sleep(10); | ||
94 | |||
95 | rc = 0; | ||
96 | for (i = 0; i < NR_CHILDREN; i++) { | ||
97 | /* Tell them to stop */ | ||
98 | kill(pids[i], SIGINT); | ||
99 | /* And wait */ | ||
100 | rc |= wait_for_child(pids[i]); | ||
101 | } | ||
102 | |||
103 | return rc; | ||
104 | } | ||
105 | |||
106 | int main(void) | ||
107 | { | ||
108 | return test_harness(multi_ebb_procs, "multi_ebb_procs"); | ||
109 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c b/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c new file mode 100644 index 000000000000..2f9bf8edfa60 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c | |||
@@ -0,0 +1,61 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <stdio.h> | ||
7 | #include <stdlib.h> | ||
8 | #include <setjmp.h> | ||
9 | #include <signal.h> | ||
10 | |||
11 | #include "ebb.h" | ||
12 | |||
13 | |||
14 | /* Test that things work sanely if we have no handler */ | ||
15 | |||
16 | static int no_handler_test(void) | ||
17 | { | ||
18 | struct event event; | ||
19 | u64 val; | ||
20 | int i; | ||
21 | |||
22 | event_init_named(&event, 0x1001e, "cycles"); | ||
23 | event_leader_ebb_init(&event); | ||
24 | |||
25 | event.attr.exclude_kernel = 1; | ||
26 | event.attr.exclude_hv = 1; | ||
27 | event.attr.exclude_idle = 1; | ||
28 | |||
29 | FAIL_IF(event_open(&event)); | ||
30 | FAIL_IF(ebb_event_enable(&event)); | ||
31 | |||
32 | val = mfspr(SPRN_EBBHR); | ||
33 | FAIL_IF(val != 0); | ||
34 | |||
35 | /* Make sure it overflows quickly */ | ||
36 | sample_period = 1000; | ||
37 | mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); | ||
38 | |||
39 | /* Spin to make sure the event has time to overflow */ | ||
40 | for (i = 0; i < 1000; i++) | ||
41 | mb(); | ||
42 | |||
43 | dump_ebb_state(); | ||
44 | |||
45 | /* We expect to see the PMU frozen & PMAO set */ | ||
46 | val = mfspr(SPRN_MMCR0); | ||
47 | FAIL_IF(val != 0x0000000080000080); | ||
48 | |||
49 | event_close(&event); | ||
50 | |||
51 | dump_ebb_state(); | ||
52 | |||
53 | /* The real test is that we never took an EBB at 0x0 */ | ||
54 | |||
55 | return 0; | ||
56 | } | ||
57 | |||
58 | int main(void) | ||
59 | { | ||
60 | return test_harness(no_handler_test,"no_handler_test"); | ||
61 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c new file mode 100644 index 000000000000..986500fd2131 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c | |||
@@ -0,0 +1,106 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <sched.h> | ||
7 | #include <signal.h> | ||
8 | #include <stdbool.h> | ||
9 | #include <stdio.h> | ||
10 | #include <stdlib.h> | ||
11 | |||
12 | #include "ebb.h" | ||
13 | |||
14 | |||
15 | /* | ||
16 | * Test that the kernel properly handles PMAE across context switches. | ||
17 | * | ||
18 | * We test this by calling into the kernel inside our EBB handler, where PMAE | ||
19 | * is clear. A cpu eater companion thread is running on the same CPU as us to | ||
20 | * encourage the scheduler to switch us. | ||
21 | * | ||
22 | * The kernel must make sure that when it context switches us back in, it | ||
23 | * honours the fact that we had PMAE clear. | ||
24 | * | ||
25 | * Observed to hit the failing case on the first EBB with a broken kernel. | ||
26 | */ | ||
27 | |||
28 | static bool mmcr0_mismatch; | ||
29 | static uint64_t before, after; | ||
30 | |||
31 | static void syscall_ebb_callee(void) | ||
32 | { | ||
33 | uint64_t val; | ||
34 | |||
35 | val = mfspr(SPRN_BESCR); | ||
36 | if (!(val & BESCR_PMEO)) { | ||
37 | ebb_state.stats.spurious++; | ||
38 | goto out; | ||
39 | } | ||
40 | |||
41 | ebb_state.stats.ebb_count++; | ||
42 | count_pmc(1, sample_period); | ||
43 | |||
44 | before = mfspr(SPRN_MMCR0); | ||
45 | |||
46 | /* Try and get ourselves scheduled, to force a PMU context switch */ | ||
47 | sched_yield(); | ||
48 | |||
49 | after = mfspr(SPRN_MMCR0); | ||
50 | if (before != after) | ||
51 | mmcr0_mismatch = true; | ||
52 | |||
53 | out: | ||
54 | reset_ebb(); | ||
55 | } | ||
56 | |||
57 | static int test_body(void) | ||
58 | { | ||
59 | struct event event; | ||
60 | |||
61 | event_init_named(&event, 0x1001e, "cycles"); | ||
62 | event_leader_ebb_init(&event); | ||
63 | |||
64 | event.attr.exclude_kernel = 1; | ||
65 | event.attr.exclude_hv = 1; | ||
66 | event.attr.exclude_idle = 1; | ||
67 | |||
68 | FAIL_IF(event_open(&event)); | ||
69 | |||
70 | setup_ebb_handler(syscall_ebb_callee); | ||
71 | ebb_global_enable(); | ||
72 | |||
73 | FAIL_IF(ebb_event_enable(&event)); | ||
74 | |||
75 | mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); | ||
76 | |||
77 | while (ebb_state.stats.ebb_count < 20 && !mmcr0_mismatch) | ||
78 | FAIL_IF(core_busy_loop()); | ||
79 | |||
80 | ebb_global_disable(); | ||
81 | ebb_freeze_pmcs(); | ||
82 | |||
83 | count_pmc(1, sample_period); | ||
84 | |||
85 | dump_ebb_state(); | ||
86 | |||
87 | if (mmcr0_mismatch) | ||
88 | printf("Saw MMCR0 before 0x%lx after 0x%lx\n", before, after); | ||
89 | |||
90 | event_close(&event); | ||
91 | |||
92 | FAIL_IF(ebb_state.stats.ebb_count == 0); | ||
93 | FAIL_IF(mmcr0_mismatch); | ||
94 | |||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | int pmae_handling(void) | ||
99 | { | ||
100 | return eat_cpu(test_body); | ||
101 | } | ||
102 | |||
103 | int main(void) | ||
104 | { | ||
105 | return test_harness(pmae_handling, "pmae_handling"); | ||
106 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c new file mode 100644 index 000000000000..a503fa70c950 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c | |||
@@ -0,0 +1,93 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <stdio.h> | ||
7 | #include <stdlib.h> | ||
8 | |||
9 | #include "ebb.h" | ||
10 | |||
11 | |||
12 | /* | ||
13 | * Test that PMC5 & 6 are frozen (ie. don't overflow) when they are not being | ||
14 | * used. Tests the MMCR0_FC56 logic in the kernel. | ||
15 | */ | ||
16 | |||
17 | static int pmc56_overflowed; | ||
18 | |||
19 | static void ebb_callee(void) | ||
20 | { | ||
21 | uint64_t val; | ||
22 | |||
23 | val = mfspr(SPRN_BESCR); | ||
24 | if (!(val & BESCR_PMEO)) { | ||
25 | ebb_state.stats.spurious++; | ||
26 | goto out; | ||
27 | } | ||
28 | |||
29 | ebb_state.stats.ebb_count++; | ||
30 | count_pmc(2, sample_period); | ||
31 | |||
32 | val = mfspr(SPRN_PMC5); | ||
33 | if (val >= COUNTER_OVERFLOW) | ||
34 | pmc56_overflowed++; | ||
35 | |||
36 | count_pmc(5, COUNTER_OVERFLOW); | ||
37 | |||
38 | val = mfspr(SPRN_PMC6); | ||
39 | if (val >= COUNTER_OVERFLOW) | ||
40 | pmc56_overflowed++; | ||
41 | |||
42 | count_pmc(6, COUNTER_OVERFLOW); | ||
43 | |||
44 | out: | ||
45 | reset_ebb(); | ||
46 | } | ||
47 | |||
48 | int pmc56_overflow(void) | ||
49 | { | ||
50 | struct event event; | ||
51 | |||
52 | /* Use PMC2 so we set PMCjCE, which enables PMC5/6 */ | ||
53 | event_init(&event, 0x2001e); | ||
54 | event_leader_ebb_init(&event); | ||
55 | |||
56 | event.attr.exclude_kernel = 1; | ||
57 | event.attr.exclude_hv = 1; | ||
58 | event.attr.exclude_idle = 1; | ||
59 | |||
60 | FAIL_IF(event_open(&event)); | ||
61 | |||
62 | setup_ebb_handler(ebb_callee); | ||
63 | ebb_global_enable(); | ||
64 | |||
65 | FAIL_IF(ebb_event_enable(&event)); | ||
66 | |||
67 | mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); | ||
68 | mtspr(SPRN_PMC5, 0); | ||
69 | mtspr(SPRN_PMC6, 0); | ||
70 | |||
71 | while (ebb_state.stats.ebb_count < 10) | ||
72 | FAIL_IF(core_busy_loop()); | ||
73 | |||
74 | ebb_global_disable(); | ||
75 | ebb_freeze_pmcs(); | ||
76 | |||
77 | count_pmc(2, sample_period); | ||
78 | |||
79 | dump_ebb_state(); | ||
80 | |||
81 | printf("PMC5/6 overflow %d\n", pmc56_overflowed); | ||
82 | |||
83 | event_close(&event); | ||
84 | |||
85 | FAIL_IF(ebb_state.stats.ebb_count == 0 || pmc56_overflowed != 0); | ||
86 | |||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | int main(void) | ||
91 | { | ||
92 | return test_harness(pmc56_overflow, "pmc56_overflow"); | ||
93 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/reg.h b/tools/testing/selftests/powerpc/pmu/ebb/reg.h new file mode 100644 index 000000000000..5921b0dfe2e9 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/reg.h | |||
@@ -0,0 +1,49 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #ifndef _SELFTESTS_POWERPC_REG_H | ||
7 | #define _SELFTESTS_POWERPC_REG_H | ||
8 | |||
9 | #define __stringify_1(x) #x | ||
10 | #define __stringify(x) __stringify_1(x) | ||
11 | |||
12 | #define mfspr(rn) ({unsigned long rval; \ | ||
13 | asm volatile("mfspr %0," __stringify(rn) \ | ||
14 | : "=r" (rval)); rval; }) | ||
15 | #define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : \ | ||
16 | : "r" ((unsigned long)(v)) \ | ||
17 | : "memory") | ||
18 | |||
19 | #define mb() asm volatile("sync" : : : "memory"); | ||
20 | |||
21 | #define SPRN_MMCR2 769 | ||
22 | #define SPRN_MMCRA 770 | ||
23 | #define SPRN_MMCR0 779 | ||
24 | #define MMCR0_PMAO 0x00000080 | ||
25 | #define MMCR0_PMAE 0x04000000 | ||
26 | #define MMCR0_FC 0x80000000 | ||
27 | #define SPRN_EBBHR 804 | ||
28 | #define SPRN_EBBRR 805 | ||
29 | #define SPRN_BESCR 806 /* Branch event status & control register */ | ||
30 | #define SPRN_BESCRS 800 /* Branch event status & control set (1 bits set to 1) */ | ||
31 | #define SPRN_BESCRSU 801 /* Branch event status & control set upper */ | ||
32 | #define SPRN_BESCRR 802 /* Branch event status & control REset (1 bits set to 0) */ | ||
33 | #define SPRN_BESCRRU 803 /* Branch event status & control REset upper */ | ||
34 | |||
35 | #define BESCR_PMEO 0x1 /* PMU Event-based exception Occurred */ | ||
36 | #define BESCR_PME (0x1ul << 32) /* PMU Event-based exception Enable */ | ||
37 | |||
38 | #define SPRN_PMC1 771 | ||
39 | #define SPRN_PMC2 772 | ||
40 | #define SPRN_PMC3 773 | ||
41 | #define SPRN_PMC4 774 | ||
42 | #define SPRN_PMC5 775 | ||
43 | #define SPRN_PMC6 776 | ||
44 | |||
45 | #define SPRN_SIAR 780 | ||
46 | #define SPRN_SDAR 781 | ||
47 | #define SPRN_SIER 768 | ||
48 | |||
49 | #endif /* _SELFTESTS_POWERPC_REG_H */ | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/reg_access_test.c b/tools/testing/selftests/powerpc/pmu/ebb/reg_access_test.c new file mode 100644 index 000000000000..0cae66f659a3 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/reg_access_test.c | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <stdio.h> | ||
7 | #include <stdlib.h> | ||
8 | |||
9 | #include "ebb.h" | ||
10 | #include "reg.h" | ||
11 | |||
12 | |||
13 | /* | ||
14 | * Test basic access to the EBB regs, they should be user accessible with no | ||
15 | * kernel interaction required. | ||
16 | */ | ||
17 | int reg_access(void) | ||
18 | { | ||
19 | uint64_t val, expected; | ||
20 | |||
21 | expected = 0x8000000100000000ull; | ||
22 | mtspr(SPRN_BESCR, expected); | ||
23 | val = mfspr(SPRN_BESCR); | ||
24 | |||
25 | FAIL_IF(val != expected); | ||
26 | |||
27 | expected = 0x0000000001000000ull; | ||
28 | mtspr(SPRN_EBBHR, expected); | ||
29 | val = mfspr(SPRN_EBBHR); | ||
30 | |||
31 | FAIL_IF(val != expected); | ||
32 | |||
33 | return 0; | ||
34 | } | ||
35 | |||
36 | int main(void) | ||
37 | { | ||
38 | return test_harness(reg_access, "reg_access"); | ||
39 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c b/tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c new file mode 100644 index 000000000000..d56607e4ffab --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <signal.h> | ||
7 | #include <stdio.h> | ||
8 | #include <stdlib.h> | ||
9 | #include <stdbool.h> | ||
10 | #include <sys/types.h> | ||
11 | #include <sys/wait.h> | ||
12 | #include <unistd.h> | ||
13 | |||
14 | #include "ebb.h" | ||
15 | |||
16 | |||
17 | /* | ||
18 | * Tests a pinned per-task event vs an EBB - in that order. The pinned per-task | ||
19 | * event should prevent the EBB event from being enabled. | ||
20 | */ | ||
21 | |||
22 | static int setup_child_event(struct event *event, pid_t child_pid) | ||
23 | { | ||
24 | event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); | ||
25 | |||
26 | event->attr.pinned = 1; | ||
27 | |||
28 | event->attr.exclude_kernel = 1; | ||
29 | event->attr.exclude_hv = 1; | ||
30 | event->attr.exclude_idle = 1; | ||
31 | |||
32 | FAIL_IF(event_open_with_pid(event, child_pid)); | ||
33 | FAIL_IF(event_enable(event)); | ||
34 | |||
35 | return 0; | ||
36 | } | ||
37 | |||
38 | int task_event_pinned_vs_ebb(void) | ||
39 | { | ||
40 | union pipe read_pipe, write_pipe; | ||
41 | struct event event; | ||
42 | pid_t pid; | ||
43 | int rc; | ||
44 | |||
45 | FAIL_IF(pipe(read_pipe.fds) == -1); | ||
46 | FAIL_IF(pipe(write_pipe.fds) == -1); | ||
47 | |||
48 | pid = fork(); | ||
49 | if (pid == 0) { | ||
50 | /* NB order of pipes looks reversed */ | ||
51 | exit(ebb_child(write_pipe, read_pipe)); | ||
52 | } | ||
53 | |||
54 | /* We setup the task event first */ | ||
55 | rc = setup_child_event(&event, pid); | ||
56 | if (rc) { | ||
57 | kill_child_and_wait(pid); | ||
58 | return rc; | ||
59 | } | ||
60 | |||
61 | /* Signal the child to install its EBB event and wait */ | ||
62 | if (sync_with_child(read_pipe, write_pipe)) | ||
63 | /* If it fails, wait for it to exit */ | ||
64 | goto wait; | ||
65 | |||
66 | /* Signal the child to run */ | ||
67 | FAIL_IF(sync_with_child(read_pipe, write_pipe)); | ||
68 | |||
69 | wait: | ||
70 | /* We expect it to fail to read the event */ | ||
71 | FAIL_IF(wait_for_child(pid) != 2); | ||
72 | FAIL_IF(event_disable(&event)); | ||
73 | FAIL_IF(event_read(&event)); | ||
74 | |||
75 | event_report(&event); | ||
76 | |||
77 | FAIL_IF(event.result.value == 0); | ||
78 | /* | ||
79 | * For reasons I don't understand enabled is usually just slightly | ||
80 | * lower than running. Would be good to confirm why. | ||
81 | */ | ||
82 | FAIL_IF(event.result.enabled == 0); | ||
83 | FAIL_IF(event.result.running == 0); | ||
84 | |||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | int main(void) | ||
89 | { | ||
90 | return test_harness(task_event_pinned_vs_ebb, "task_event_pinned_vs_ebb"); | ||
91 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c b/tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c new file mode 100644 index 000000000000..eba32196dbbf --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c | |||
@@ -0,0 +1,83 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <signal.h> | ||
7 | #include <stdio.h> | ||
8 | #include <stdlib.h> | ||
9 | #include <stdbool.h> | ||
10 | #include <sys/types.h> | ||
11 | #include <sys/wait.h> | ||
12 | #include <unistd.h> | ||
13 | |||
14 | #include "ebb.h" | ||
15 | |||
16 | |||
17 | /* | ||
18 | * Tests a per-task event vs an EBB - in that order. The EBB should push the | ||
19 | * per-task event off the PMU. | ||
20 | */ | ||
21 | |||
22 | static int setup_child_event(struct event *event, pid_t child_pid) | ||
23 | { | ||
24 | event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); | ||
25 | |||
26 | event->attr.exclude_kernel = 1; | ||
27 | event->attr.exclude_hv = 1; | ||
28 | event->attr.exclude_idle = 1; | ||
29 | |||
30 | FAIL_IF(event_open_with_pid(event, child_pid)); | ||
31 | FAIL_IF(event_enable(event)); | ||
32 | |||
33 | return 0; | ||
34 | } | ||
35 | |||
36 | int task_event_vs_ebb(void) | ||
37 | { | ||
38 | union pipe read_pipe, write_pipe; | ||
39 | struct event event; | ||
40 | pid_t pid; | ||
41 | int rc; | ||
42 | |||
43 | FAIL_IF(pipe(read_pipe.fds) == -1); | ||
44 | FAIL_IF(pipe(write_pipe.fds) == -1); | ||
45 | |||
46 | pid = fork(); | ||
47 | if (pid == 0) { | ||
48 | /* NB order of pipes looks reversed */ | ||
49 | exit(ebb_child(write_pipe, read_pipe)); | ||
50 | } | ||
51 | |||
52 | /* We setup the task event first */ | ||
53 | rc = setup_child_event(&event, pid); | ||
54 | if (rc) { | ||
55 | kill_child_and_wait(pid); | ||
56 | return rc; | ||
57 | } | ||
58 | |||
59 | /* Signal the child to install its EBB event and wait */ | ||
60 | if (sync_with_child(read_pipe, write_pipe)) | ||
61 | /* If it fails, wait for it to exit */ | ||
62 | goto wait; | ||
63 | |||
64 | /* Signal the child to run */ | ||
65 | FAIL_IF(sync_with_child(read_pipe, write_pipe)); | ||
66 | |||
67 | wait: | ||
68 | /* The EBB event should push the task event off so the child should succeed */ | ||
69 | FAIL_IF(wait_for_child(pid)); | ||
70 | FAIL_IF(event_disable(&event)); | ||
71 | FAIL_IF(event_read(&event)); | ||
72 | |||
73 | event_report(&event); | ||
74 | |||
75 | /* The task event may have run, or not so we can't assert anything about it */ | ||
76 | |||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | int main(void) | ||
81 | { | ||
82 | return test_harness(task_event_vs_ebb, "task_event_vs_ebb"); | ||
83 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/trace.c b/tools/testing/selftests/powerpc/pmu/ebb/trace.c new file mode 100644 index 000000000000..251e66ab2aa7 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/trace.c | |||
@@ -0,0 +1,300 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #include <errno.h> | ||
7 | #include <stdio.h> | ||
8 | #include <stdlib.h> | ||
9 | #include <string.h> | ||
10 | #include <sys/mman.h> | ||
11 | |||
12 | #include "trace.h" | ||
13 | |||
14 | |||
15 | struct trace_buffer *trace_buffer_allocate(u64 size) | ||
16 | { | ||
17 | struct trace_buffer *tb; | ||
18 | |||
19 | if (size < sizeof(*tb)) { | ||
20 | fprintf(stderr, "Error: trace buffer too small\n"); | ||
21 | return NULL; | ||
22 | } | ||
23 | |||
24 | tb = mmap(NULL, size, PROT_READ | PROT_WRITE, | ||
25 | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); | ||
26 | if (tb == MAP_FAILED) { | ||
27 | perror("mmap"); | ||
28 | return NULL; | ||
29 | } | ||
30 | |||
31 | tb->size = size; | ||
32 | tb->tail = tb->data; | ||
33 | tb->overflow = false; | ||
34 | |||
35 | return tb; | ||
36 | } | ||
37 | |||
38 | static bool trace_check_bounds(struct trace_buffer *tb, void *p) | ||
39 | { | ||
40 | return p < ((void *)tb + tb->size); | ||
41 | } | ||
42 | |||
43 | static bool trace_check_alloc(struct trace_buffer *tb, void *p) | ||
44 | { | ||
45 | /* | ||
46 | * If we ever overflowed don't allow any more input. This prevents us | ||
47 | * from dropping a large item and then later logging a small one. The | ||
48 | * buffer should just stop when overflow happened, not be patchy. If | ||
49 | * you're overflowing, make your buffer bigger. | ||
50 | */ | ||
51 | if (tb->overflow) | ||
52 | return false; | ||
53 | |||
54 | if (!trace_check_bounds(tb, p)) { | ||
55 | tb->overflow = true; | ||
56 | return false; | ||
57 | } | ||
58 | |||
59 | return true; | ||
60 | } | ||
61 | |||
62 | static void *trace_alloc(struct trace_buffer *tb, int bytes) | ||
63 | { | ||
64 | void *p, *newtail; | ||
65 | |||
66 | p = tb->tail; | ||
67 | newtail = tb->tail + bytes; | ||
68 | if (!trace_check_alloc(tb, newtail)) | ||
69 | return NULL; | ||
70 | |||
71 | tb->tail = newtail; | ||
72 | |||
73 | return p; | ||
74 | } | ||
75 | |||
76 | static struct trace_entry *trace_alloc_entry(struct trace_buffer *tb, int payload_size) | ||
77 | { | ||
78 | struct trace_entry *e; | ||
79 | |||
80 | e = trace_alloc(tb, sizeof(*e) + payload_size); | ||
81 | if (e) | ||
82 | e->length = payload_size; | ||
83 | |||
84 | return e; | ||
85 | } | ||
86 | |||
87 | int trace_log_reg(struct trace_buffer *tb, u64 reg, u64 value) | ||
88 | { | ||
89 | struct trace_entry *e; | ||
90 | u64 *p; | ||
91 | |||
92 | e = trace_alloc_entry(tb, sizeof(reg) + sizeof(value)); | ||
93 | if (!e) | ||
94 | return -ENOSPC; | ||
95 | |||
96 | e->type = TRACE_TYPE_REG; | ||
97 | p = (u64 *)e->data; | ||
98 | *p++ = reg; | ||
99 | *p++ = value; | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | int trace_log_counter(struct trace_buffer *tb, u64 value) | ||
105 | { | ||
106 | struct trace_entry *e; | ||
107 | u64 *p; | ||
108 | |||
109 | e = trace_alloc_entry(tb, sizeof(value)); | ||
110 | if (!e) | ||
111 | return -ENOSPC; | ||
112 | |||
113 | e->type = TRACE_TYPE_COUNTER; | ||
114 | p = (u64 *)e->data; | ||
115 | *p++ = value; | ||
116 | |||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | int trace_log_string(struct trace_buffer *tb, char *str) | ||
121 | { | ||
122 | struct trace_entry *e; | ||
123 | char *p; | ||
124 | int len; | ||
125 | |||
126 | len = strlen(str); | ||
127 | |||
128 | /* We NULL terminate to make printing easier */ | ||
129 | e = trace_alloc_entry(tb, len + 1); | ||
130 | if (!e) | ||
131 | return -ENOSPC; | ||
132 | |||
133 | e->type = TRACE_TYPE_STRING; | ||
134 | p = (char *)e->data; | ||
135 | memcpy(p, str, len); | ||
136 | p += len; | ||
137 | *p = '\0'; | ||
138 | |||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | int trace_log_indent(struct trace_buffer *tb) | ||
143 | { | ||
144 | struct trace_entry *e; | ||
145 | |||
146 | e = trace_alloc_entry(tb, 0); | ||
147 | if (!e) | ||
148 | return -ENOSPC; | ||
149 | |||
150 | e->type = TRACE_TYPE_INDENT; | ||
151 | |||
152 | return 0; | ||
153 | } | ||
154 | |||
155 | int trace_log_outdent(struct trace_buffer *tb) | ||
156 | { | ||
157 | struct trace_entry *e; | ||
158 | |||
159 | e = trace_alloc_entry(tb, 0); | ||
160 | if (!e) | ||
161 | return -ENOSPC; | ||
162 | |||
163 | e->type = TRACE_TYPE_OUTDENT; | ||
164 | |||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | static void trace_print_header(int seq, int prefix) | ||
169 | { | ||
170 | printf("%*s[%d]: ", prefix, "", seq); | ||
171 | } | ||
172 | |||
173 | static char *trace_decode_reg(int reg) | ||
174 | { | ||
175 | switch (reg) { | ||
176 | case 769: return "SPRN_MMCR2"; break; | ||
177 | case 770: return "SPRN_MMCRA"; break; | ||
178 | case 779: return "SPRN_MMCR0"; break; | ||
179 | case 804: return "SPRN_EBBHR"; break; | ||
180 | case 805: return "SPRN_EBBRR"; break; | ||
181 | case 806: return "SPRN_BESCR"; break; | ||
182 | case 800: return "SPRN_BESCRS"; break; | ||
183 | case 801: return "SPRN_BESCRSU"; break; | ||
184 | case 802: return "SPRN_BESCRR"; break; | ||
185 | case 803: return "SPRN_BESCRRU"; break; | ||
186 | case 771: return "SPRN_PMC1"; break; | ||
187 | case 772: return "SPRN_PMC2"; break; | ||
188 | case 773: return "SPRN_PMC3"; break; | ||
189 | case 774: return "SPRN_PMC4"; break; | ||
190 | case 775: return "SPRN_PMC5"; break; | ||
191 | case 776: return "SPRN_PMC6"; break; | ||
192 | case 780: return "SPRN_SIAR"; break; | ||
193 | case 781: return "SPRN_SDAR"; break; | ||
194 | case 768: return "SPRN_SIER"; break; | ||
195 | } | ||
196 | |||
197 | return NULL; | ||
198 | } | ||
199 | |||
200 | static void trace_print_reg(struct trace_entry *e) | ||
201 | { | ||
202 | u64 *p, *reg, *value; | ||
203 | char *name; | ||
204 | |||
205 | p = (u64 *)e->data; | ||
206 | reg = p++; | ||
207 | value = p; | ||
208 | |||
209 | name = trace_decode_reg(*reg); | ||
210 | if (name) | ||
211 | printf("register %-10s = 0x%016llx\n", name, *value); | ||
212 | else | ||
213 | printf("register %lld = 0x%016llx\n", *reg, *value); | ||
214 | } | ||
215 | |||
216 | static void trace_print_counter(struct trace_entry *e) | ||
217 | { | ||
218 | u64 *value; | ||
219 | |||
220 | value = (u64 *)e->data; | ||
221 | printf("counter = %lld\n", *value); | ||
222 | } | ||
223 | |||
224 | static void trace_print_string(struct trace_entry *e) | ||
225 | { | ||
226 | char *str; | ||
227 | |||
228 | str = (char *)e->data; | ||
229 | puts(str); | ||
230 | } | ||
231 | |||
232 | #define BASE_PREFIX 2 | ||
233 | #define PREFIX_DELTA 8 | ||
234 | |||
235 | static void trace_print_entry(struct trace_entry *e, int seq, int *prefix) | ||
236 | { | ||
237 | switch (e->type) { | ||
238 | case TRACE_TYPE_REG: | ||
239 | trace_print_header(seq, *prefix); | ||
240 | trace_print_reg(e); | ||
241 | break; | ||
242 | case TRACE_TYPE_COUNTER: | ||
243 | trace_print_header(seq, *prefix); | ||
244 | trace_print_counter(e); | ||
245 | break; | ||
246 | case TRACE_TYPE_STRING: | ||
247 | trace_print_header(seq, *prefix); | ||
248 | trace_print_string(e); | ||
249 | break; | ||
250 | case TRACE_TYPE_INDENT: | ||
251 | trace_print_header(seq, *prefix); | ||
252 | puts("{"); | ||
253 | *prefix += PREFIX_DELTA; | ||
254 | break; | ||
255 | case TRACE_TYPE_OUTDENT: | ||
256 | *prefix -= PREFIX_DELTA; | ||
257 | if (*prefix < BASE_PREFIX) | ||
258 | *prefix = BASE_PREFIX; | ||
259 | trace_print_header(seq, *prefix); | ||
260 | puts("}"); | ||
261 | break; | ||
262 | default: | ||
263 | trace_print_header(seq, *prefix); | ||
264 | printf("entry @ %p type %d\n", e, e->type); | ||
265 | break; | ||
266 | } | ||
267 | } | ||
268 | |||
269 | void trace_buffer_print(struct trace_buffer *tb) | ||
270 | { | ||
271 | struct trace_entry *e; | ||
272 | int i, prefix; | ||
273 | void *p; | ||
274 | |||
275 | printf("Trace buffer dump:\n"); | ||
276 | printf(" address %p \n", tb); | ||
277 | printf(" tail %p\n", tb->tail); | ||
278 | printf(" size %llu\n", tb->size); | ||
279 | printf(" overflow %s\n", tb->overflow ? "TRUE" : "false"); | ||
280 | printf(" Content:\n"); | ||
281 | |||
282 | p = tb->data; | ||
283 | |||
284 | i = 0; | ||
285 | prefix = BASE_PREFIX; | ||
286 | |||
287 | while (trace_check_bounds(tb, p) && p < tb->tail) { | ||
288 | e = p; | ||
289 | |||
290 | trace_print_entry(e, i, &prefix); | ||
291 | |||
292 | i++; | ||
293 | p = (void *)e + sizeof(*e) + e->length; | ||
294 | } | ||
295 | } | ||
296 | |||
297 | void trace_print_location(struct trace_buffer *tb) | ||
298 | { | ||
299 | printf("Trace buffer 0x%llx bytes @ %p\n", tb->size, tb); | ||
300 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/trace.h b/tools/testing/selftests/powerpc/pmu/ebb/trace.h new file mode 100644 index 000000000000..926458e28c8b --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/trace.h | |||
@@ -0,0 +1,41 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #ifndef _SELFTESTS_POWERPC_PMU_EBB_TRACE_H | ||
7 | #define _SELFTESTS_POWERPC_PMU_EBB_TRACE_H | ||
8 | |||
9 | #include "utils.h" | ||
10 | |||
11 | #define TRACE_TYPE_REG 1 | ||
12 | #define TRACE_TYPE_COUNTER 2 | ||
13 | #define TRACE_TYPE_STRING 3 | ||
14 | #define TRACE_TYPE_INDENT 4 | ||
15 | #define TRACE_TYPE_OUTDENT 5 | ||
16 | |||
17 | struct trace_entry | ||
18 | { | ||
19 | u8 type; | ||
20 | u8 length; | ||
21 | u8 data[0]; | ||
22 | }; | ||
23 | |||
24 | struct trace_buffer | ||
25 | { | ||
26 | u64 size; | ||
27 | bool overflow; | ||
28 | void *tail; | ||
29 | u8 data[0]; | ||
30 | }; | ||
31 | |||
32 | struct trace_buffer *trace_buffer_allocate(u64 size); | ||
33 | int trace_log_reg(struct trace_buffer *tb, u64 reg, u64 value); | ||
34 | int trace_log_counter(struct trace_buffer *tb, u64 value); | ||
35 | int trace_log_string(struct trace_buffer *tb, char *str); | ||
36 | int trace_log_indent(struct trace_buffer *tb); | ||
37 | int trace_log_outdent(struct trace_buffer *tb); | ||
38 | void trace_buffer_print(struct trace_buffer *tb); | ||
39 | void trace_print_location(struct trace_buffer *tb); | ||
40 | |||
41 | #endif /* _SELFTESTS_POWERPC_PMU_EBB_TRACE_H */ | ||
diff --git a/tools/testing/selftests/powerpc/pmu/event.c b/tools/testing/selftests/powerpc/pmu/event.c index 2b2d11df2450..184b36807d48 100644 --- a/tools/testing/selftests/powerpc/pmu/event.c +++ b/tools/testing/selftests/powerpc/pmu/event.c | |||
@@ -39,7 +39,13 @@ void event_init_named(struct event *e, u64 config, char *name) | |||
39 | event_init_opts(e, config, PERF_TYPE_RAW, name); | 39 | event_init_opts(e, config, PERF_TYPE_RAW, name); |
40 | } | 40 | } |
41 | 41 | ||
42 | void event_init(struct event *e, u64 config) | ||
43 | { | ||
44 | event_init_opts(e, config, PERF_TYPE_RAW, "event"); | ||
45 | } | ||
46 | |||
42 | #define PERF_CURRENT_PID 0 | 47 | #define PERF_CURRENT_PID 0 |
48 | #define PERF_NO_PID -1 | ||
43 | #define PERF_NO_CPU -1 | 49 | #define PERF_NO_CPU -1 |
44 | #define PERF_NO_GROUP -1 | 50 | #define PERF_NO_GROUP -1 |
45 | 51 | ||
@@ -59,6 +65,16 @@ int event_open_with_group(struct event *e, int group_fd) | |||
59 | return event_open_with_options(e, PERF_CURRENT_PID, PERF_NO_CPU, group_fd); | 65 | return event_open_with_options(e, PERF_CURRENT_PID, PERF_NO_CPU, group_fd); |
60 | } | 66 | } |
61 | 67 | ||
68 | int event_open_with_pid(struct event *e, pid_t pid) | ||
69 | { | ||
70 | return event_open_with_options(e, pid, PERF_NO_CPU, PERF_NO_GROUP); | ||
71 | } | ||
72 | |||
73 | int event_open_with_cpu(struct event *e, int cpu) | ||
74 | { | ||
75 | return event_open_with_options(e, PERF_NO_PID, cpu, PERF_NO_GROUP); | ||
76 | } | ||
77 | |||
62 | int event_open(struct event *e) | 78 | int event_open(struct event *e) |
63 | { | 79 | { |
64 | return event_open_with_options(e, PERF_CURRENT_PID, PERF_NO_CPU, PERF_NO_GROUP); | 80 | return event_open_with_options(e, PERF_CURRENT_PID, PERF_NO_CPU, PERF_NO_GROUP); |
@@ -69,6 +85,16 @@ void event_close(struct event *e) | |||
69 | close(e->fd); | 85 | close(e->fd); |
70 | } | 86 | } |
71 | 87 | ||
88 | int event_enable(struct event *e) | ||
89 | { | ||
90 | return ioctl(e->fd, PERF_EVENT_IOC_ENABLE); | ||
91 | } | ||
92 | |||
93 | int event_disable(struct event *e) | ||
94 | { | ||
95 | return ioctl(e->fd, PERF_EVENT_IOC_DISABLE); | ||
96 | } | ||
97 | |||
72 | int event_reset(struct event *e) | 98 | int event_reset(struct event *e) |
73 | { | 99 | { |
74 | return ioctl(e->fd, PERF_EVENT_IOC_RESET); | 100 | return ioctl(e->fd, PERF_EVENT_IOC_RESET); |
diff --git a/tools/testing/selftests/powerpc/pmu/event.h b/tools/testing/selftests/powerpc/pmu/event.h index e6993192ff34..a0ea6b1eef73 100644 --- a/tools/testing/selftests/powerpc/pmu/event.h +++ b/tools/testing/selftests/powerpc/pmu/event.h | |||
@@ -29,8 +29,12 @@ void event_init_named(struct event *e, u64 config, char *name); | |||
29 | void event_init_opts(struct event *e, u64 config, int type, char *name); | 29 | void event_init_opts(struct event *e, u64 config, int type, char *name); |
30 | int event_open_with_options(struct event *e, pid_t pid, int cpu, int group_fd); | 30 | int event_open_with_options(struct event *e, pid_t pid, int cpu, int group_fd); |
31 | int event_open_with_group(struct event *e, int group_fd); | 31 | int event_open_with_group(struct event *e, int group_fd); |
32 | int event_open_with_pid(struct event *e, pid_t pid); | ||
33 | int event_open_with_cpu(struct event *e, int cpu); | ||
32 | int event_open(struct event *e); | 34 | int event_open(struct event *e); |
33 | void event_close(struct event *e); | 35 | void event_close(struct event *e); |
36 | int event_enable(struct event *e); | ||
37 | int event_disable(struct event *e); | ||
34 | int event_reset(struct event *e); | 38 | int event_reset(struct event *e); |
35 | int event_read(struct event *e); | 39 | int event_read(struct event *e); |
36 | void event_report_justified(struct event *e, int name_width, int result_width); | 40 | void event_report_justified(struct event *e, int name_width, int result_width); |
diff --git a/tools/testing/selftests/powerpc/pmu/lib.c b/tools/testing/selftests/powerpc/pmu/lib.c new file mode 100644 index 000000000000..0f6a4731d546 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/lib.c | |||
@@ -0,0 +1,252 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #define _GNU_SOURCE /* For CPU_ZERO etc. */ | ||
7 | |||
8 | #include <errno.h> | ||
9 | #include <sched.h> | ||
10 | #include <setjmp.h> | ||
11 | #include <stdlib.h> | ||
12 | #include <sys/wait.h> | ||
13 | |||
14 | #include "utils.h" | ||
15 | #include "lib.h" | ||
16 | |||
17 | |||
18 | int pick_online_cpu(void) | ||
19 | { | ||
20 | cpu_set_t mask; | ||
21 | int cpu; | ||
22 | |||
23 | CPU_ZERO(&mask); | ||
24 | |||
25 | if (sched_getaffinity(0, sizeof(mask), &mask)) { | ||
26 | perror("sched_getaffinity"); | ||
27 | return -1; | ||
28 | } | ||
29 | |||
30 | /* We prefer a primary thread, but skip 0 */ | ||
31 | for (cpu = 8; cpu < CPU_SETSIZE; cpu += 8) | ||
32 | if (CPU_ISSET(cpu, &mask)) | ||
33 | return cpu; | ||
34 | |||
35 | /* Search for anything, but in reverse */ | ||
36 | for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--) | ||
37 | if (CPU_ISSET(cpu, &mask)) | ||
38 | return cpu; | ||
39 | |||
40 | printf("No cpus in affinity mask?!\n"); | ||
41 | return -1; | ||
42 | } | ||
43 | |||
44 | int bind_to_cpu(int cpu) | ||
45 | { | ||
46 | cpu_set_t mask; | ||
47 | |||
48 | printf("Binding to cpu %d\n", cpu); | ||
49 | |||
50 | CPU_ZERO(&mask); | ||
51 | CPU_SET(cpu, &mask); | ||
52 | |||
53 | return sched_setaffinity(0, sizeof(mask), &mask); | ||
54 | } | ||
55 | |||
56 | #define PARENT_TOKEN 0xAA | ||
57 | #define CHILD_TOKEN 0x55 | ||
58 | |||
59 | int sync_with_child(union pipe read_pipe, union pipe write_pipe) | ||
60 | { | ||
61 | char c = PARENT_TOKEN; | ||
62 | |||
63 | FAIL_IF(write(write_pipe.write_fd, &c, 1) != 1); | ||
64 | FAIL_IF(read(read_pipe.read_fd, &c, 1) != 1); | ||
65 | if (c != CHILD_TOKEN) /* sometimes expected */ | ||
66 | return 1; | ||
67 | |||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | int wait_for_parent(union pipe read_pipe) | ||
72 | { | ||
73 | char c; | ||
74 | |||
75 | FAIL_IF(read(read_pipe.read_fd, &c, 1) != 1); | ||
76 | FAIL_IF(c != PARENT_TOKEN); | ||
77 | |||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | int notify_parent(union pipe write_pipe) | ||
82 | { | ||
83 | char c = CHILD_TOKEN; | ||
84 | |||
85 | FAIL_IF(write(write_pipe.write_fd, &c, 1) != 1); | ||
86 | |||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | int notify_parent_of_error(union pipe write_pipe) | ||
91 | { | ||
92 | char c = ~CHILD_TOKEN; | ||
93 | |||
94 | FAIL_IF(write(write_pipe.write_fd, &c, 1) != 1); | ||
95 | |||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | int wait_for_child(pid_t child_pid) | ||
100 | { | ||
101 | int rc; | ||
102 | |||
103 | if (waitpid(child_pid, &rc, 0) == -1) { | ||
104 | perror("waitpid"); | ||
105 | return 1; | ||
106 | } | ||
107 | |||
108 | if (WIFEXITED(rc)) | ||
109 | rc = WEXITSTATUS(rc); | ||
110 | else | ||
111 | rc = 1; /* Signal or other */ | ||
112 | |||
113 | return rc; | ||
114 | } | ||
115 | |||
116 | int kill_child_and_wait(pid_t child_pid) | ||
117 | { | ||
118 | kill(child_pid, SIGTERM); | ||
119 | |||
120 | return wait_for_child(child_pid); | ||
121 | } | ||
122 | |||
123 | static int eat_cpu_child(union pipe read_pipe, union pipe write_pipe) | ||
124 | { | ||
125 | volatile int i = 0; | ||
126 | |||
127 | /* | ||
128 | * We are just here to eat cpu and die. So make sure we can be killed, | ||
129 | * and also don't do any custom SIGTERM handling. | ||
130 | */ | ||
131 | signal(SIGTERM, SIG_DFL); | ||
132 | |||
133 | notify_parent(write_pipe); | ||
134 | wait_for_parent(read_pipe); | ||
135 | |||
136 | /* Soak up cpu forever */ | ||
137 | while (1) i++; | ||
138 | |||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | pid_t eat_cpu(int (test_function)(void)) | ||
143 | { | ||
144 | union pipe read_pipe, write_pipe; | ||
145 | int cpu, rc; | ||
146 | pid_t pid; | ||
147 | |||
148 | cpu = pick_online_cpu(); | ||
149 | FAIL_IF(cpu < 0); | ||
150 | FAIL_IF(bind_to_cpu(cpu)); | ||
151 | |||
152 | if (pipe(read_pipe.fds) == -1) | ||
153 | return -1; | ||
154 | |||
155 | if (pipe(write_pipe.fds) == -1) | ||
156 | return -1; | ||
157 | |||
158 | pid = fork(); | ||
159 | if (pid == 0) | ||
160 | exit(eat_cpu_child(write_pipe, read_pipe)); | ||
161 | |||
162 | if (sync_with_child(read_pipe, write_pipe)) { | ||
163 | rc = -1; | ||
164 | goto out; | ||
165 | } | ||
166 | |||
167 | printf("main test running as pid %d\n", getpid()); | ||
168 | |||
169 | rc = test_function(); | ||
170 | out: | ||
171 | kill(pid, SIGKILL); | ||
172 | |||
173 | return rc; | ||
174 | } | ||
175 | |||
176 | struct addr_range libc, vdso; | ||
177 | |||
178 | int parse_proc_maps(void) | ||
179 | { | ||
180 | char execute, name[128]; | ||
181 | uint64_t start, end; | ||
182 | FILE *f; | ||
183 | int rc; | ||
184 | |||
185 | f = fopen("/proc/self/maps", "r"); | ||
186 | if (!f) { | ||
187 | perror("fopen"); | ||
188 | return -1; | ||
189 | } | ||
190 | |||
191 | do { | ||
192 | /* This skips line with no executable which is what we want */ | ||
193 | rc = fscanf(f, "%lx-%lx %*c%*c%c%*c %*x %*d:%*d %*d %127s\n", | ||
194 | &start, &end, &execute, name); | ||
195 | if (rc <= 0) | ||
196 | break; | ||
197 | |||
198 | if (execute != 'x') | ||
199 | continue; | ||
200 | |||
201 | if (strstr(name, "libc")) { | ||
202 | libc.first = start; | ||
203 | libc.last = end - 1; | ||
204 | } else if (strstr(name, "[vdso]")) { | ||
205 | vdso.first = start; | ||
206 | vdso.last = end - 1; | ||
207 | } | ||
208 | } while(1); | ||
209 | |||
210 | fclose(f); | ||
211 | |||
212 | return 0; | ||
213 | } | ||
214 | |||
215 | #define PARANOID_PATH "/proc/sys/kernel/perf_event_paranoid" | ||
216 | |||
217 | bool require_paranoia_below(int level) | ||
218 | { | ||
219 | unsigned long current; | ||
220 | char *end, buf[16]; | ||
221 | FILE *f; | ||
222 | int rc; | ||
223 | |||
224 | rc = -1; | ||
225 | |||
226 | f = fopen(PARANOID_PATH, "r"); | ||
227 | if (!f) { | ||
228 | perror("fopen"); | ||
229 | goto out; | ||
230 | } | ||
231 | |||
232 | if (!fgets(buf, sizeof(buf), f)) { | ||
233 | printf("Couldn't read " PARANOID_PATH "?\n"); | ||
234 | goto out_close; | ||
235 | } | ||
236 | |||
237 | current = strtoul(buf, &end, 10); | ||
238 | |||
239 | if (end == buf) { | ||
240 | printf("Couldn't parse " PARANOID_PATH "?\n"); | ||
241 | goto out_close; | ||
242 | } | ||
243 | |||
244 | if (current >= level) | ||
245 | goto out; | ||
246 | |||
247 | rc = 0; | ||
248 | out_close: | ||
249 | fclose(f); | ||
250 | out: | ||
251 | return rc; | ||
252 | } | ||
diff --git a/tools/testing/selftests/powerpc/pmu/lib.h b/tools/testing/selftests/powerpc/pmu/lib.h new file mode 100644 index 000000000000..ca5d72ae3be6 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/lib.h | |||
@@ -0,0 +1,41 @@ | |||
1 | /* | ||
2 | * Copyright 2014, Michael Ellerman, IBM Corp. | ||
3 | * Licensed under GPLv2. | ||
4 | */ | ||
5 | |||
6 | #ifndef __SELFTESTS_POWERPC_PMU_LIB_H | ||
7 | #define __SELFTESTS_POWERPC_PMU_LIB_H | ||
8 | |||
9 | #include <stdio.h> | ||
10 | #include <stdint.h> | ||
11 | #include <string.h> | ||
12 | #include <unistd.h> | ||
13 | |||
14 | union pipe { | ||
15 | struct { | ||
16 | int read_fd; | ||
17 | int write_fd; | ||
18 | }; | ||
19 | int fds[2]; | ||
20 | }; | ||
21 | |||
22 | extern int pick_online_cpu(void); | ||
23 | extern int bind_to_cpu(int cpu); | ||
24 | extern int kill_child_and_wait(pid_t child_pid); | ||
25 | extern int wait_for_child(pid_t child_pid); | ||
26 | extern int sync_with_child(union pipe read_pipe, union pipe write_pipe); | ||
27 | extern int wait_for_parent(union pipe read_pipe); | ||
28 | extern int notify_parent(union pipe write_pipe); | ||
29 | extern int notify_parent_of_error(union pipe write_pipe); | ||
30 | extern pid_t eat_cpu(int (test_function)(void)); | ||
31 | extern bool require_paranoia_below(int level); | ||
32 | |||
33 | struct addr_range { | ||
34 | uint64_t first, last; | ||
35 | }; | ||
36 | |||
37 | extern struct addr_range libc, vdso; | ||
38 | |||
39 | int parse_proc_maps(void); | ||
40 | |||
41 | #endif /* __SELFTESTS_POWERPC_PMU_LIB_H */ | ||