aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCyril Bur <cyrilbur@gmail.com>2016-09-23 02:18:13 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2016-10-04 05:10:09 -0400
commit65ca668f58a260e144621fd93a413da67635b999 (patch)
treea279400c17b19584e6b9bc3ecbe02d431e2b8ec1
parentd11994314b2bfe028bc39be24b44298787925160 (diff)
selftests/powerpc: Check for VSX preservation across userspace preemption
Ensure the kernel correctly switches VSX registers correctly. VSX registers are all volatile, and despite the kernel preserving VSX across syscalls, it doesn't have to. Test that during interrupts and timeslices ending the VSX regs remain the same. Signed-off-by: Cyril Bur <cyrilbur@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--tools/testing/selftests/powerpc/math/Makefile5
-rw-r--r--tools/testing/selftests/powerpc/math/vsx_asm.S61
-rw-r--r--tools/testing/selftests/powerpc/math/vsx_preempt.c147
-rw-r--r--tools/testing/selftests/powerpc/vsx_asm.h71
4 files changed, 283 insertions, 1 deletions
diff --git a/tools/testing/selftests/powerpc/math/Makefile b/tools/testing/selftests/powerpc/math/Makefile
index 5b88875d5955..a505b66d408a 100644
--- a/tools/testing/selftests/powerpc/math/Makefile
+++ b/tools/testing/selftests/powerpc/math/Makefile
@@ -1,4 +1,4 @@
1TEST_PROGS := fpu_syscall fpu_preempt fpu_signal vmx_syscall vmx_preempt vmx_signal 1TEST_PROGS := fpu_syscall fpu_preempt fpu_signal vmx_syscall vmx_preempt vmx_signal vsx_preempt
2 2
3all: $(TEST_PROGS) 3all: $(TEST_PROGS)
4 4
@@ -13,6 +13,9 @@ vmx_syscall: vmx_asm.S
13vmx_preempt: vmx_asm.S 13vmx_preempt: vmx_asm.S
14vmx_signal: vmx_asm.S 14vmx_signal: vmx_asm.S
15 15
16vsx_preempt: CFLAGS += -mvsx
17vsx_preempt: vsx_asm.S
18
16include ../../lib.mk 19include ../../lib.mk
17 20
18clean: 21clean:
diff --git a/tools/testing/selftests/powerpc/math/vsx_asm.S b/tools/testing/selftests/powerpc/math/vsx_asm.S
new file mode 100644
index 000000000000..a110dd882d5e
--- /dev/null
+++ b/tools/testing/selftests/powerpc/math/vsx_asm.S
@@ -0,0 +1,61 @@
1/*
2 * Copyright 2015, Cyril Bur, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include "../basic_asm.h"
11#include "../vsx_asm.h"
12
13#long check_vsx(vector int *r3);
14#This function wraps storeing VSX regs to the end of an array and a
15#call to a comparison function in C which boils down to a memcmp()
16FUNC_START(check_vsx)
17 PUSH_BASIC_STACK(32)
18 std r3,STACK_FRAME_PARAM(0)(sp)
19 addi r3, r3, 16 * 12 #Second half of array
20 bl store_vsx
21 ld r3,STACK_FRAME_PARAM(0)(sp)
22 bl vsx_memcmp
23 POP_BASIC_STACK(32)
24 blr
25FUNC_END(check_vsx)
26
27# int preempt_vmx(vector int *varray, int *threads_starting,
28# int *running);
29# On starting will (atomically) decrement threads_starting as a signal
30# that the VMX have been loaded with varray. Will proceed to check the
31# validity of the VMX registers while running is not zero.
32FUNC_START(preempt_vsx)
33 PUSH_BASIC_STACK(512)
34 std r3,STACK_FRAME_PARAM(0)(sp) # vector int *varray
35 std r4,STACK_FRAME_PARAM(1)(sp) # int *threads_starting
36 std r5,STACK_FRAME_PARAM(2)(sp) # int *running
37
38 bl load_vsx
39 nop
40
41 sync
42 # Atomic DEC
43 ld r3,STACK_FRAME_PARAM(1)(sp)
441: lwarx r4,0,r3
45 addi r4,r4,-1
46 stwcx. r4,0,r3
47 bne- 1b
48
492: ld r3,STACK_FRAME_PARAM(0)(sp)
50 bl check_vsx
51 nop
52 cmpdi r3,0
53 bne 3f
54 ld r4,STACK_FRAME_PARAM(2)(sp)
55 ld r5,0(r4)
56 cmpwi r5,0
57 bne 2b
58
593: POP_BASIC_STACK(512)
60 blr
61FUNC_END(preempt_vsx)
diff --git a/tools/testing/selftests/powerpc/math/vsx_preempt.c b/tools/testing/selftests/powerpc/math/vsx_preempt.c
new file mode 100644
index 000000000000..6387f03a0a6a
--- /dev/null
+++ b/tools/testing/selftests/powerpc/math/vsx_preempt.c
@@ -0,0 +1,147 @@
1/*
2 * Copyright 2015, Cyril Bur, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * This test attempts to see if the VSX registers change across preemption.
10 * There is no way to be sure preemption happened so this test just
11 * uses many threads and a long wait. As such, a successful test
12 * doesn't mean much but a failure is bad.
13 */
14
15#include <stdio.h>
16#include <string.h>
17#include <unistd.h>
18#include <sys/syscall.h>
19#include <sys/time.h>
20#include <sys/types.h>
21#include <sys/wait.h>
22#include <stdlib.h>
23#include <pthread.h>
24
25#include "utils.h"
26
27/* Time to wait for workers to get preempted (seconds) */
28#define PREEMPT_TIME 20
29/*
30 * Factor by which to multiply number of online CPUs for total number of
31 * worker threads
32 */
33#define THREAD_FACTOR 8
34
35/*
36 * Ensure there is twice the number of non-volatile VMX regs!
37 * check_vmx() is going to use the other half as space to put the live
38 * registers before calling vsx_memcmp()
39 */
40__thread vector int varray[24] = {
41 {1, 2, 3, 4 }, {5, 6, 7, 8 }, {9, 10,11,12},
42 {13,14,15,16}, {17,18,19,20}, {21,22,23,24},
43 {25,26,27,28}, {29,30,31,32}, {33,34,35,36},
44 {37,38,39,40}, {41,42,43,44}, {45,46,47,48}
45};
46
47int threads_starting;
48int running;
49
50extern long preempt_vsx(vector int *varray, int *threads_starting, int *running);
51
52long vsx_memcmp(vector int *a) {
53 vector int zero = {0, 0, 0, 0};
54 int i;
55
56 FAIL_IF(a != varray);
57
58 for(i = 0; i < 12; i++) {
59 if (memcmp(&a[i + 12], &zero, sizeof(vector int)) == 0) {
60 fprintf(stderr, "Detected zero from the VSX reg %d\n", i + 12);
61 return 2;
62 }
63 }
64
65 if (memcmp(a, &a[12], 12 * sizeof(vector int))) {
66 long *p = (long *)a;
67 fprintf(stderr, "VSX mismatch\n");
68 for (i = 0; i < 24; i=i+2)
69 fprintf(stderr, "%d: 0x%08lx%08lx | 0x%08lx%08lx\n",
70 i/2 + i%2 + 20, p[i], p[i + 1], p[i + 24], p[i + 25]);
71 return 1;
72 }
73 return 0;
74}
75
76void *preempt_vsx_c(void *p)
77{
78 int i, j;
79 long rc;
80 srand(pthread_self());
81 for (i = 0; i < 12; i++)
82 for (j = 0; j < 4; j++) {
83 varray[i][j] = rand();
84 /* Don't want zero because it hides kernel problems */
85 if (varray[i][j] == 0)
86 j--;
87 }
88 rc = preempt_vsx(varray, &threads_starting, &running);
89 if (rc == 2)
90 fprintf(stderr, "Caught zeros in VSX compares\n");
91 return (void *)rc;
92}
93
94int test_preempt_vsx(void)
95{
96 int i, rc, threads;
97 pthread_t *tids;
98
99 threads = sysconf(_SC_NPROCESSORS_ONLN) * THREAD_FACTOR;
100 tids = malloc(threads * sizeof(pthread_t));
101 FAIL_IF(!tids);
102
103 running = true;
104 threads_starting = threads;
105 for (i = 0; i < threads; i++) {
106 rc = pthread_create(&tids[i], NULL, preempt_vsx_c, NULL);
107 FAIL_IF(rc);
108 }
109
110 setbuf(stdout, NULL);
111 /* Not really nessesary but nice to wait for every thread to start */
112 printf("\tWaiting for %d workers to start...", threads_starting);
113 while(threads_starting)
114 asm volatile("": : :"memory");
115 printf("done\n");
116
117 printf("\tWaiting for %d seconds to let some workers get preempted...", PREEMPT_TIME);
118 sleep(PREEMPT_TIME);
119 printf("done\n");
120
121 printf("\tStopping workers...");
122 /*
123 * Working are checking this value every loop. In preempt_vsx 'cmpwi r5,0; bne 2b'.
124 * r5 will have loaded the value of running.
125 */
126 running = 0;
127 for (i = 0; i < threads; i++) {
128 void *rc_p;
129 pthread_join(tids[i], &rc_p);
130
131 /*
132 * Harness will say the fail was here, look at why preempt_vsx
133 * returned
134 */
135 if ((long) rc_p)
136 printf("oops\n");
137 FAIL_IF((long) rc_p);
138 }
139 printf("done\n");
140
141 return 0;
142}
143
144int main(int argc, char *argv[])
145{
146 return test_harness(test_preempt_vsx, "vsx_preempt");
147}
diff --git a/tools/testing/selftests/powerpc/vsx_asm.h b/tools/testing/selftests/powerpc/vsx_asm.h
new file mode 100644
index 000000000000..d828bfb6ef2d
--- /dev/null
+++ b/tools/testing/selftests/powerpc/vsx_asm.h
@@ -0,0 +1,71 @@
1/*
2 * Copyright 2015, Cyril Bur, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include "basic_asm.h"
11
12/*
13 * Careful this will 'clobber' vsx (by design), VSX are always
14 * volatile though so unlike vmx this isn't so much of an issue
15 * Still should avoid calling from C
16 */
17FUNC_START(load_vsx)
18 li r5,0
19 lxvx vs20,r5,r3
20 addi r5,r5,16
21 lxvx vs21,r5,r3
22 addi r5,r5,16
23 lxvx vs22,r5,r3
24 addi r5,r5,16
25 lxvx vs23,r5,r3
26 addi r5,r5,16
27 lxvx vs24,r5,r3
28 addi r5,r5,16
29 lxvx vs25,r5,r3
30 addi r5,r5,16
31 lxvx vs26,r5,r3
32 addi r5,r5,16
33 lxvx vs27,r5,r3
34 addi r5,r5,16
35 lxvx vs28,r5,r3
36 addi r5,r5,16
37 lxvx vs29,r5,r3
38 addi r5,r5,16
39 lxvx vs30,r5,r3
40 addi r5,r5,16
41 lxvx vs31,r5,r3
42 blr
43FUNC_END(load_vsx)
44
45FUNC_START(store_vsx)
46 li r5,0
47 stxvx vs20,r5,r3
48 addi r5,r5,16
49 stxvx vs21,r5,r3
50 addi r5,r5,16
51 stxvx vs22,r5,r3
52 addi r5,r5,16
53 stxvx vs23,r5,r3
54 addi r5,r5,16
55 stxvx vs24,r5,r3
56 addi r5,r5,16
57 stxvx vs25,r5,r3
58 addi r5,r5,16
59 stxvx vs26,r5,r3
60 addi r5,r5,16
61 stxvx vs27,r5,r3
62 addi r5,r5,16
63 stxvx vs28,r5,r3
64 addi r5,r5,16
65 stxvx vs29,r5,r3
66 addi r5,r5,16
67 stxvx vs30,r5,r3
68 addi r5,r5,16
69 stxvx vs31,r5,r3
70 blr
71FUNC_END(store_vsx)