aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/raid6/Makefile9
-rw-r--r--lib/raid6/algos.c12
-rw-r--r--lib/raid6/altivec.uc3
-rw-r--r--lib/raid6/avx2.c251
-rw-r--r--lib/raid6/mmx.c2
-rw-r--r--lib/raid6/recov_avx2.c323
-rw-r--r--lib/raid6/recov_ssse3.c4
-rw-r--r--lib/raid6/sse1.c2
-rw-r--r--lib/raid6/sse2.c8
-rw-r--r--lib/raid6/test/Makefile29
-rw-r--r--lib/raid6/x86.h14
11 files changed, 631 insertions, 26 deletions
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index de06dfe165b8..9f7c184725d7 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -1,8 +1,11 @@
1obj-$(CONFIG_RAID6_PQ) += raid6_pq.o 1obj-$(CONFIG_RAID6_PQ) += raid6_pq.o
2 2
3raid6_pq-y += algos.o recov.o recov_ssse3.o tables.o int1.o int2.o int4.o \ 3raid6_pq-y += algos.o recov.o tables.o int1.o int2.o int4.o \
4 int8.o int16.o int32.o altivec1.o altivec2.o altivec4.o \ 4 int8.o int16.o int32.o
5 altivec8.o mmx.o sse1.o sse2.o 5
6raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o
7raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o
8
6hostprogs-y += mktables 9hostprogs-y += mktables
7 10
8quiet_cmd_unroll = UNROLL $@ 11quiet_cmd_unroll = UNROLL $@
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index 589f5f50ad2e..6d7316fe9f30 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -45,11 +45,20 @@ const struct raid6_calls * const raid6_algos[] = {
45 &raid6_sse1x2, 45 &raid6_sse1x2,
46 &raid6_sse2x1, 46 &raid6_sse2x1,
47 &raid6_sse2x2, 47 &raid6_sse2x2,
48#ifdef CONFIG_AS_AVX2
49 &raid6_avx2x1,
50 &raid6_avx2x2,
51#endif
48#endif 52#endif
49#if defined(__x86_64__) && !defined(__arch_um__) 53#if defined(__x86_64__) && !defined(__arch_um__)
50 &raid6_sse2x1, 54 &raid6_sse2x1,
51 &raid6_sse2x2, 55 &raid6_sse2x2,
52 &raid6_sse2x4, 56 &raid6_sse2x4,
57#ifdef CONFIG_AS_AVX2
58 &raid6_avx2x1,
59 &raid6_avx2x2,
60 &raid6_avx2x4,
61#endif
53#endif 62#endif
54#ifdef CONFIG_ALTIVEC 63#ifdef CONFIG_ALTIVEC
55 &raid6_altivec1, 64 &raid6_altivec1,
@@ -72,6 +81,9 @@ EXPORT_SYMBOL_GPL(raid6_datap_recov);
72 81
73const struct raid6_recov_calls *const raid6_recov_algos[] = { 82const struct raid6_recov_calls *const raid6_recov_algos[] = {
74#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__) 83#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__)
84#ifdef CONFIG_AS_AVX2
85 &raid6_recov_avx2,
86#endif
75 &raid6_recov_ssse3, 87 &raid6_recov_ssse3,
76#endif 88#endif
77 &raid6_recov_intx1, 89 &raid6_recov_intx1,
diff --git a/lib/raid6/altivec.uc b/lib/raid6/altivec.uc
index b71012b756f4..7cc12b532e95 100644
--- a/lib/raid6/altivec.uc
+++ b/lib/raid6/altivec.uc
@@ -24,13 +24,10 @@
24 24
25#include <linux/raid/pq.h> 25#include <linux/raid/pq.h>
26 26
27#ifdef CONFIG_ALTIVEC
28
29#include <altivec.h> 27#include <altivec.h>
30#ifdef __KERNEL__ 28#ifdef __KERNEL__
31# include <asm/cputable.h> 29# include <asm/cputable.h>
32# include <asm/switch_to.h> 30# include <asm/switch_to.h>
33#endif
34 31
35/* 32/*
36 * This is the C data type to use. We use a vector of 33 * This is the C data type to use. We use a vector of
diff --git a/lib/raid6/avx2.c b/lib/raid6/avx2.c
new file mode 100644
index 000000000000..bc3b1dd436eb
--- /dev/null
+++ b/lib/raid6/avx2.c
@@ -0,0 +1,251 @@
1/* -*- linux-c -*- ------------------------------------------------------- *
2 *
3 * Copyright (C) 2012 Intel Corporation
4 * Author: Yuanhan Liu <yuanhan.liu@linux.intel.com>
5 *
6 * Based on sse2.c: Copyright 2002 H. Peter Anvin - All Rights Reserved
7 *
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
12 * Boston MA 02111-1307, USA; either version 2 of the License, or
13 * (at your option) any later version; incorporated herein by reference.
14 *
15 * ----------------------------------------------------------------------- */
16
17/*
18 * AVX2 implementation of RAID-6 syndrome functions
19 *
20 */
21
22#ifdef CONFIG_AS_AVX2
23
24#include <linux/raid/pq.h>
25#include "x86.h"
26
27static const struct raid6_avx2_constants {
28 u64 x1d[4];
29} raid6_avx2_constants __aligned(32) = {
30 { 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,
31 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,},
32};
33
34static int raid6_have_avx2(void)
35{
36 return boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX);
37}
38
39/*
40 * Plain AVX2 implementation
41 */
42static void raid6_avx21_gen_syndrome(int disks, size_t bytes, void **ptrs)
43{
44 u8 **dptr = (u8 **)ptrs;
45 u8 *p, *q;
46 int d, z, z0;
47
48 z0 = disks - 3; /* Highest data disk */
49 p = dptr[z0+1]; /* XOR parity */
50 q = dptr[z0+2]; /* RS syndrome */
51
52 kernel_fpu_begin();
53
54 asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
55 asm volatile("vpxor %ymm3,%ymm3,%ymm3"); /* Zero temp */
56
57 for (d = 0; d < bytes; d += 32) {
58 asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
59 asm volatile("vmovdqa %0,%%ymm2" : : "m" (dptr[z0][d]));/* P[0] */
60 asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
61 asm volatile("vmovdqa %ymm2,%ymm4");/* Q[0] */
62 asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z0-1][d]));
63 for (z = z0-2; z >= 0; z--) {
64 asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
65 asm volatile("vpcmpgtb %ymm4,%ymm3,%ymm5");
66 asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
67 asm volatile("vpand %ymm0,%ymm5,%ymm5");
68 asm volatile("vpxor %ymm5,%ymm4,%ymm4");
69 asm volatile("vpxor %ymm6,%ymm2,%ymm2");
70 asm volatile("vpxor %ymm6,%ymm4,%ymm4");
71 asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z][d]));
72 }
73 asm volatile("vpcmpgtb %ymm4,%ymm3,%ymm5");
74 asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
75 asm volatile("vpand %ymm0,%ymm5,%ymm5");
76 asm volatile("vpxor %ymm5,%ymm4,%ymm4");
77 asm volatile("vpxor %ymm6,%ymm2,%ymm2");
78 asm volatile("vpxor %ymm6,%ymm4,%ymm4");
79
80 asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
81 asm volatile("vpxor %ymm2,%ymm2,%ymm2");
82 asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
83 asm volatile("vpxor %ymm4,%ymm4,%ymm4");
84 }
85
86 asm volatile("sfence" : : : "memory");
87 kernel_fpu_end();
88}
89
90const struct raid6_calls raid6_avx2x1 = {
91 raid6_avx21_gen_syndrome,
92 raid6_have_avx2,
93 "avx2x1",
94 1 /* Has cache hints */
95};
96
97/*
98 * Unrolled-by-2 AVX2 implementation
99 */
100static void raid6_avx22_gen_syndrome(int disks, size_t bytes, void **ptrs)
101{
102 u8 **dptr = (u8 **)ptrs;
103 u8 *p, *q;
104 int d, z, z0;
105
106 z0 = disks - 3; /* Highest data disk */
107 p = dptr[z0+1]; /* XOR parity */
108 q = dptr[z0+2]; /* RS syndrome */
109
110 kernel_fpu_begin();
111
112 asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
113 asm volatile("vpxor %ymm1,%ymm1,%ymm1"); /* Zero temp */
114
115 /* We uniformly assume a single prefetch covers at least 32 bytes */
116 for (d = 0; d < bytes; d += 64) {
117 asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
118 asm volatile("prefetchnta %0" : : "m" (dptr[z0][d+32]));
119 asm volatile("vmovdqa %0,%%ymm2" : : "m" (dptr[z0][d]));/* P[0] */
120 asm volatile("vmovdqa %0,%%ymm3" : : "m" (dptr[z0][d+32]));/* P[1] */
121 asm volatile("vmovdqa %ymm2,%ymm4"); /* Q[0] */
122 asm volatile("vmovdqa %ymm3,%ymm6"); /* Q[1] */
123 for (z = z0-1; z >= 0; z--) {
124 asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
125 asm volatile("prefetchnta %0" : : "m" (dptr[z][d+32]));
126 asm volatile("vpcmpgtb %ymm4,%ymm1,%ymm5");
127 asm volatile("vpcmpgtb %ymm6,%ymm1,%ymm7");
128 asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
129 asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
130 asm volatile("vpand %ymm0,%ymm5,%ymm5");
131 asm volatile("vpand %ymm0,%ymm7,%ymm7");
132 asm volatile("vpxor %ymm5,%ymm4,%ymm4");
133 asm volatile("vpxor %ymm7,%ymm6,%ymm6");
134 asm volatile("vmovdqa %0,%%ymm5" : : "m" (dptr[z][d]));
135 asm volatile("vmovdqa %0,%%ymm7" : : "m" (dptr[z][d+32]));
136 asm volatile("vpxor %ymm5,%ymm2,%ymm2");
137 asm volatile("vpxor %ymm7,%ymm3,%ymm3");
138 asm volatile("vpxor %ymm5,%ymm4,%ymm4");
139 asm volatile("vpxor %ymm7,%ymm6,%ymm6");
140 }
141 asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
142 asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
143 asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
144 asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
145 }
146
147 asm volatile("sfence" : : : "memory");
148 kernel_fpu_end();
149}
150
151const struct raid6_calls raid6_avx2x2 = {
152 raid6_avx22_gen_syndrome,
153 raid6_have_avx2,
154 "avx2x2",
155 1 /* Has cache hints */
156};
157
158#ifdef CONFIG_X86_64
159
160/*
161 * Unrolled-by-4 AVX2 implementation
162 */
163static void raid6_avx24_gen_syndrome(int disks, size_t bytes, void **ptrs)
164{
165 u8 **dptr = (u8 **)ptrs;
166 u8 *p, *q;
167 int d, z, z0;
168
169 z0 = disks - 3; /* Highest data disk */
170 p = dptr[z0+1]; /* XOR parity */
171 q = dptr[z0+2]; /* RS syndrome */
172
173 kernel_fpu_begin();
174
175 asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
176 asm volatile("vpxor %ymm1,%ymm1,%ymm1"); /* Zero temp */
177 asm volatile("vpxor %ymm2,%ymm2,%ymm2"); /* P[0] */
178 asm volatile("vpxor %ymm3,%ymm3,%ymm3"); /* P[1] */
179 asm volatile("vpxor %ymm4,%ymm4,%ymm4"); /* Q[0] */
180 asm volatile("vpxor %ymm6,%ymm6,%ymm6"); /* Q[1] */
181 asm volatile("vpxor %ymm10,%ymm10,%ymm10"); /* P[2] */
182 asm volatile("vpxor %ymm11,%ymm11,%ymm11"); /* P[3] */
183 asm volatile("vpxor %ymm12,%ymm12,%ymm12"); /* Q[2] */
184 asm volatile("vpxor %ymm14,%ymm14,%ymm14"); /* Q[3] */
185
186 for (d = 0; d < bytes; d += 128) {
187 for (z = z0; z >= 0; z--) {
188 asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
189 asm volatile("prefetchnta %0" : : "m" (dptr[z][d+32]));
190 asm volatile("prefetchnta %0" : : "m" (dptr[z][d+64]));
191 asm volatile("prefetchnta %0" : : "m" (dptr[z][d+96]));
192 asm volatile("vpcmpgtb %ymm4,%ymm1,%ymm5");
193 asm volatile("vpcmpgtb %ymm6,%ymm1,%ymm7");
194 asm volatile("vpcmpgtb %ymm12,%ymm1,%ymm13");
195 asm volatile("vpcmpgtb %ymm14,%ymm1,%ymm15");
196 asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
197 asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
198 asm volatile("vpaddb %ymm12,%ymm12,%ymm12");
199 asm volatile("vpaddb %ymm14,%ymm14,%ymm14");
200 asm volatile("vpand %ymm0,%ymm5,%ymm5");
201 asm volatile("vpand %ymm0,%ymm7,%ymm7");
202 asm volatile("vpand %ymm0,%ymm13,%ymm13");
203 asm volatile("vpand %ymm0,%ymm15,%ymm15");
204 asm volatile("vpxor %ymm5,%ymm4,%ymm4");
205 asm volatile("vpxor %ymm7,%ymm6,%ymm6");
206 asm volatile("vpxor %ymm13,%ymm12,%ymm12");
207 asm volatile("vpxor %ymm15,%ymm14,%ymm14");
208 asm volatile("vmovdqa %0,%%ymm5" : : "m" (dptr[z][d]));
209 asm volatile("vmovdqa %0,%%ymm7" : : "m" (dptr[z][d+32]));
210 asm volatile("vmovdqa %0,%%ymm13" : : "m" (dptr[z][d+64]));
211 asm volatile("vmovdqa %0,%%ymm15" : : "m" (dptr[z][d+96]));
212 asm volatile("vpxor %ymm5,%ymm2,%ymm2");
213 asm volatile("vpxor %ymm7,%ymm3,%ymm3");
214 asm volatile("vpxor %ymm13,%ymm10,%ymm10");
215 asm volatile("vpxor %ymm15,%ymm11,%ymm11");
216 asm volatile("vpxor %ymm5,%ymm4,%ymm4");
217 asm volatile("vpxor %ymm7,%ymm6,%ymm6");
218 asm volatile("vpxor %ymm13,%ymm12,%ymm12");
219 asm volatile("vpxor %ymm15,%ymm14,%ymm14");
220 }
221 asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
222 asm volatile("vpxor %ymm2,%ymm2,%ymm2");
223 asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
224 asm volatile("vpxor %ymm3,%ymm3,%ymm3");
225 asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d+64]));
226 asm volatile("vpxor %ymm10,%ymm10,%ymm10");
227 asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d+96]));
228 asm volatile("vpxor %ymm11,%ymm11,%ymm11");
229 asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
230 asm volatile("vpxor %ymm4,%ymm4,%ymm4");
231 asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
232 asm volatile("vpxor %ymm6,%ymm6,%ymm6");
233 asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64]));
234 asm volatile("vpxor %ymm12,%ymm12,%ymm12");
235 asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96]));
236 asm volatile("vpxor %ymm14,%ymm14,%ymm14");
237 }
238
239 asm volatile("sfence" : : : "memory");
240 kernel_fpu_end();
241}
242
243const struct raid6_calls raid6_avx2x4 = {
244 raid6_avx24_gen_syndrome,
245 raid6_have_avx2,
246 "avx2x4",
247 1 /* Has cache hints */
248};
249#endif
250
251#endif /* CONFIG_AS_AVX2 */
diff --git a/lib/raid6/mmx.c b/lib/raid6/mmx.c
index 279347f23094..590c71c9e200 100644
--- a/lib/raid6/mmx.c
+++ b/lib/raid6/mmx.c
@@ -16,7 +16,7 @@
16 * MMX implementation of RAID-6 syndrome functions 16 * MMX implementation of RAID-6 syndrome functions
17 */ 17 */
18 18
19#if defined(__i386__) && !defined(__arch_um__) 19#ifdef CONFIG_X86_32
20 20
21#include <linux/raid/pq.h> 21#include <linux/raid/pq.h>
22#include "x86.h" 22#include "x86.h"
diff --git a/lib/raid6/recov_avx2.c b/lib/raid6/recov_avx2.c
new file mode 100644
index 000000000000..e1eea433a493
--- /dev/null
+++ b/lib/raid6/recov_avx2.c
@@ -0,0 +1,323 @@
1/*
2 * Copyright (C) 2012 Intel Corporation
3 * Author: Jim Kukunas <james.t.kukunas@linux.intel.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; version 2
8 * of the License.
9 */
10
11#if CONFIG_AS_AVX2
12
13#include <linux/raid/pq.h>
14#include "x86.h"
15
16static int raid6_has_avx2(void)
17{
18 return boot_cpu_has(X86_FEATURE_AVX2) &&
19 boot_cpu_has(X86_FEATURE_AVX);
20}
21
22static void raid6_2data_recov_avx2(int disks, size_t bytes, int faila,
23 int failb, void **ptrs)
24{
25 u8 *p, *q, *dp, *dq;
26 const u8 *pbmul; /* P multiplier table for B data */
27 const u8 *qmul; /* Q multiplier table (for both) */
28 const u8 x0f = 0x0f;
29
30 p = (u8 *)ptrs[disks-2];
31 q = (u8 *)ptrs[disks-1];
32
33 /* Compute syndrome with zero for the missing data pages
34 Use the dead data pages as temporary storage for
35 delta p and delta q */
36 dp = (u8 *)ptrs[faila];
37 ptrs[faila] = (void *)raid6_empty_zero_page;
38 ptrs[disks-2] = dp;
39 dq = (u8 *)ptrs[failb];
40 ptrs[failb] = (void *)raid6_empty_zero_page;
41 ptrs[disks-1] = dq;
42
43 raid6_call.gen_syndrome(disks, bytes, ptrs);
44
45 /* Restore pointer table */
46 ptrs[faila] = dp;
47 ptrs[failb] = dq;
48 ptrs[disks-2] = p;
49 ptrs[disks-1] = q;
50
51 /* Now, pick the proper data tables */
52 pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
53 qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
54 raid6_gfexp[failb]]];
55
56 kernel_fpu_begin();
57
58 /* ymm0 = x0f[16] */
59 asm volatile("vpbroadcastb %0, %%ymm7" : : "m" (x0f));
60
61 while (bytes) {
62#ifdef CONFIG_X86_64
63 asm volatile("vmovdqa %0, %%ymm1" : : "m" (q[0]));
64 asm volatile("vmovdqa %0, %%ymm9" : : "m" (q[32]));
65 asm volatile("vmovdqa %0, %%ymm0" : : "m" (p[0]));
66 asm volatile("vmovdqa %0, %%ymm8" : : "m" (p[32]));
67 asm volatile("vpxor %0, %%ymm1, %%ymm1" : : "m" (dq[0]));
68 asm volatile("vpxor %0, %%ymm9, %%ymm9" : : "m" (dq[32]));
69 asm volatile("vpxor %0, %%ymm0, %%ymm0" : : "m" (dp[0]));
70 asm volatile("vpxor %0, %%ymm8, %%ymm8" : : "m" (dp[32]));
71
72 /*
73 * 1 = dq[0] ^ q[0]
74 * 9 = dq[32] ^ q[32]
75 * 0 = dp[0] ^ p[0]
76 * 8 = dp[32] ^ p[32]
77 */
78
79 asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (qmul[0]));
80 asm volatile("vbroadcasti128 %0, %%ymm5" : : "m" (qmul[16]));
81
82 asm volatile("vpsraw $4, %ymm1, %ymm3");
83 asm volatile("vpsraw $4, %ymm9, %ymm12");
84 asm volatile("vpand %ymm7, %ymm1, %ymm1");
85 asm volatile("vpand %ymm7, %ymm9, %ymm9");
86 asm volatile("vpand %ymm7, %ymm3, %ymm3");
87 asm volatile("vpand %ymm7, %ymm12, %ymm12");
88 asm volatile("vpshufb %ymm9, %ymm4, %ymm14");
89 asm volatile("vpshufb %ymm1, %ymm4, %ymm4");
90 asm volatile("vpshufb %ymm12, %ymm5, %ymm15");
91 asm volatile("vpshufb %ymm3, %ymm5, %ymm5");
92 asm volatile("vpxor %ymm14, %ymm15, %ymm15");
93 asm volatile("vpxor %ymm4, %ymm5, %ymm5");
94
95 /*
96 * 5 = qx[0]
97 * 15 = qx[32]
98 */
99
100 asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (pbmul[0]));
101 asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (pbmul[16]));
102 asm volatile("vpsraw $4, %ymm0, %ymm2");
103 asm volatile("vpsraw $4, %ymm8, %ymm6");
104 asm volatile("vpand %ymm7, %ymm0, %ymm3");
105 asm volatile("vpand %ymm7, %ymm8, %ymm14");
106 asm volatile("vpand %ymm7, %ymm2, %ymm2");
107 asm volatile("vpand %ymm7, %ymm6, %ymm6");
108 asm volatile("vpshufb %ymm14, %ymm4, %ymm12");
109 asm volatile("vpshufb %ymm3, %ymm4, %ymm4");
110 asm volatile("vpshufb %ymm6, %ymm1, %ymm13");
111 asm volatile("vpshufb %ymm2, %ymm1, %ymm1");
112 asm volatile("vpxor %ymm4, %ymm1, %ymm1");
113 asm volatile("vpxor %ymm12, %ymm13, %ymm13");
114
115 /*
116 * 1 = pbmul[px[0]]
117 * 13 = pbmul[px[32]]
118 */
119 asm volatile("vpxor %ymm5, %ymm1, %ymm1");
120 asm volatile("vpxor %ymm15, %ymm13, %ymm13");
121
122 /*
123 * 1 = db = DQ
124 * 13 = db[32] = DQ[32]
125 */
126 asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
127 asm volatile("vmovdqa %%ymm13,%0" : "=m" (dq[32]));
128 asm volatile("vpxor %ymm1, %ymm0, %ymm0");
129 asm volatile("vpxor %ymm13, %ymm8, %ymm8");
130
131 asm volatile("vmovdqa %%ymm0, %0" : "=m" (dp[0]));
132 asm volatile("vmovdqa %%ymm8, %0" : "=m" (dp[32]));
133
134 bytes -= 64;
135 p += 64;
136 q += 64;
137 dp += 64;
138 dq += 64;
139#else
140 asm volatile("vmovdqa %0, %%ymm1" : : "m" (*q));
141 asm volatile("vmovdqa %0, %%ymm0" : : "m" (*p));
142 asm volatile("vpxor %0, %%ymm1, %%ymm1" : : "m" (*dq));
143 asm volatile("vpxor %0, %%ymm0, %%ymm0" : : "m" (*dp));
144
145 /* 1 = dq ^ q; 0 = dp ^ p */
146
147 asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (qmul[0]));
148 asm volatile("vbroadcasti128 %0, %%ymm5" : : "m" (qmul[16]));
149
150 /*
151 * 1 = dq ^ q
152 * 3 = dq ^ p >> 4
153 */
154 asm volatile("vpsraw $4, %ymm1, %ymm3");
155 asm volatile("vpand %ymm7, %ymm1, %ymm1");
156 asm volatile("vpand %ymm7, %ymm3, %ymm3");
157 asm volatile("vpshufb %ymm1, %ymm4, %ymm4");
158 asm volatile("vpshufb %ymm3, %ymm5, %ymm5");
159 asm volatile("vpxor %ymm4, %ymm5, %ymm5");
160
161 /* 5 = qx */
162
163 asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (pbmul[0]));
164 asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (pbmul[16]));
165
166 asm volatile("vpsraw $4, %ymm0, %ymm2");
167 asm volatile("vpand %ymm7, %ymm0, %ymm3");
168 asm volatile("vpand %ymm7, %ymm2, %ymm2");
169 asm volatile("vpshufb %ymm3, %ymm4, %ymm4");
170 asm volatile("vpshufb %ymm2, %ymm1, %ymm1");
171 asm volatile("vpxor %ymm4, %ymm1, %ymm1");
172
173 /* 1 = pbmul[px] */
174 asm volatile("vpxor %ymm5, %ymm1, %ymm1");
175 /* 1 = db = DQ */
176 asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
177
178 asm volatile("vpxor %ymm1, %ymm0, %ymm0");
179 asm volatile("vmovdqa %%ymm0, %0" : "=m" (dp[0]));
180
181 bytes -= 32;
182 p += 32;
183 q += 32;
184 dp += 32;
185 dq += 32;
186#endif
187 }
188
189 kernel_fpu_end();
190}
191
192static void raid6_datap_recov_avx2(int disks, size_t bytes, int faila,
193 void **ptrs)
194{
195 u8 *p, *q, *dq;
196 const u8 *qmul; /* Q multiplier table */
197 const u8 x0f = 0x0f;
198
199 p = (u8 *)ptrs[disks-2];
200 q = (u8 *)ptrs[disks-1];
201
202 /* Compute syndrome with zero for the missing data page
203 Use the dead data page as temporary storage for delta q */
204 dq = (u8 *)ptrs[faila];
205 ptrs[faila] = (void *)raid6_empty_zero_page;
206 ptrs[disks-1] = dq;
207
208 raid6_call.gen_syndrome(disks, bytes, ptrs);
209
210 /* Restore pointer table */
211 ptrs[faila] = dq;
212 ptrs[disks-1] = q;
213
214 /* Now, pick the proper data tables */
215 qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
216
217 kernel_fpu_begin();
218
219 asm volatile("vpbroadcastb %0, %%ymm7" : : "m" (x0f));
220
221 while (bytes) {
222#ifdef CONFIG_X86_64
223 asm volatile("vmovdqa %0, %%ymm3" : : "m" (dq[0]));
224 asm volatile("vmovdqa %0, %%ymm8" : : "m" (dq[32]));
225 asm volatile("vpxor %0, %%ymm3, %%ymm3" : : "m" (q[0]));
226 asm volatile("vpxor %0, %%ymm8, %%ymm8" : : "m" (q[32]));
227
228 /*
229 * 3 = q[0] ^ dq[0]
230 * 8 = q[32] ^ dq[32]
231 */
232 asm volatile("vbroadcasti128 %0, %%ymm0" : : "m" (qmul[0]));
233 asm volatile("vmovapd %ymm0, %ymm13");
234 asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (qmul[16]));
235 asm volatile("vmovapd %ymm1, %ymm14");
236
237 asm volatile("vpsraw $4, %ymm3, %ymm6");
238 asm volatile("vpsraw $4, %ymm8, %ymm12");
239 asm volatile("vpand %ymm7, %ymm3, %ymm3");
240 asm volatile("vpand %ymm7, %ymm8, %ymm8");
241 asm volatile("vpand %ymm7, %ymm6, %ymm6");
242 asm volatile("vpand %ymm7, %ymm12, %ymm12");
243 asm volatile("vpshufb %ymm3, %ymm0, %ymm0");
244 asm volatile("vpshufb %ymm8, %ymm13, %ymm13");
245 asm volatile("vpshufb %ymm6, %ymm1, %ymm1");
246 asm volatile("vpshufb %ymm12, %ymm14, %ymm14");
247 asm volatile("vpxor %ymm0, %ymm1, %ymm1");
248 asm volatile("vpxor %ymm13, %ymm14, %ymm14");
249
250 /*
251 * 1 = qmul[q[0] ^ dq[0]]
252 * 14 = qmul[q[32] ^ dq[32]]
253 */
254 asm volatile("vmovdqa %0, %%ymm2" : : "m" (p[0]));
255 asm volatile("vmovdqa %0, %%ymm12" : : "m" (p[32]));
256 asm volatile("vpxor %ymm1, %ymm2, %ymm2");
257 asm volatile("vpxor %ymm14, %ymm12, %ymm12");
258
259 /*
260 * 2 = p[0] ^ qmul[q[0] ^ dq[0]]
261 * 12 = p[32] ^ qmul[q[32] ^ dq[32]]
262 */
263
264 asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
265 asm volatile("vmovdqa %%ymm14, %0" : "=m" (dq[32]));
266 asm volatile("vmovdqa %%ymm2, %0" : "=m" (p[0]));
267 asm volatile("vmovdqa %%ymm12,%0" : "=m" (p[32]));
268
269 bytes -= 64;
270 p += 64;
271 q += 64;
272 dq += 64;
273#else
274 asm volatile("vmovdqa %0, %%ymm3" : : "m" (dq[0]));
275 asm volatile("vpxor %0, %%ymm3, %%ymm3" : : "m" (q[0]));
276
277 /* 3 = q ^ dq */
278
279 asm volatile("vbroadcasti128 %0, %%ymm0" : : "m" (qmul[0]));
280 asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (qmul[16]));
281
282 asm volatile("vpsraw $4, %ymm3, %ymm6");
283 asm volatile("vpand %ymm7, %ymm3, %ymm3");
284 asm volatile("vpand %ymm7, %ymm6, %ymm6");
285 asm volatile("vpshufb %ymm3, %ymm0, %ymm0");
286 asm volatile("vpshufb %ymm6, %ymm1, %ymm1");
287 asm volatile("vpxor %ymm0, %ymm1, %ymm1");
288
289 /* 1 = qmul[q ^ dq] */
290
291 asm volatile("vmovdqa %0, %%ymm2" : : "m" (p[0]));
292 asm volatile("vpxor %ymm1, %ymm2, %ymm2");
293
294 /* 2 = p ^ qmul[q ^ dq] */
295
296 asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
297 asm volatile("vmovdqa %%ymm2, %0" : "=m" (p[0]));
298
299 bytes -= 32;
300 p += 32;
301 q += 32;
302 dq += 32;
303#endif
304 }
305
306 kernel_fpu_end();
307}
308
309const struct raid6_recov_calls raid6_recov_avx2 = {
310 .data2 = raid6_2data_recov_avx2,
311 .datap = raid6_datap_recov_avx2,
312 .valid = raid6_has_avx2,
313#ifdef CONFIG_X86_64
314 .name = "avx2x2",
315#else
316 .name = "avx2x1",
317#endif
318 .priority = 2,
319};
320
321#else
322#warning "your version of binutils lacks AVX2 support"
323#endif
diff --git a/lib/raid6/recov_ssse3.c b/lib/raid6/recov_ssse3.c
index ecb710c0b4d9..a9168328f03b 100644
--- a/lib/raid6/recov_ssse3.c
+++ b/lib/raid6/recov_ssse3.c
@@ -7,8 +7,6 @@
7 * of the License. 7 * of the License.
8 */ 8 */
9 9
10#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__)
11
12#include <linux/raid/pq.h> 10#include <linux/raid/pq.h>
13#include "x86.h" 11#include "x86.h"
14 12
@@ -332,5 +330,3 @@ const struct raid6_recov_calls raid6_recov_ssse3 = {
332#endif 330#endif
333 .priority = 1, 331 .priority = 1,
334}; 332};
335
336#endif
diff --git a/lib/raid6/sse1.c b/lib/raid6/sse1.c
index 10dd91948c07..f76297139445 100644
--- a/lib/raid6/sse1.c
+++ b/lib/raid6/sse1.c
@@ -21,7 +21,7 @@
21 * worthwhile as a separate implementation. 21 * worthwhile as a separate implementation.
22 */ 22 */
23 23
24#if defined(__i386__) && !defined(__arch_um__) 24#ifdef CONFIG_X86_32
25 25
26#include <linux/raid/pq.h> 26#include <linux/raid/pq.h>
27#include "x86.h" 27#include "x86.h"
diff --git a/lib/raid6/sse2.c b/lib/raid6/sse2.c
index bc2d57daa589..85b82c85f28e 100644
--- a/lib/raid6/sse2.c
+++ b/lib/raid6/sse2.c
@@ -17,8 +17,6 @@
17 * 17 *
18 */ 18 */
19 19
20#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__)
21
22#include <linux/raid/pq.h> 20#include <linux/raid/pq.h>
23#include "x86.h" 21#include "x86.h"
24 22
@@ -159,9 +157,7 @@ const struct raid6_calls raid6_sse2x2 = {
159 1 /* Has cache hints */ 157 1 /* Has cache hints */
160}; 158};
161 159
162#endif 160#ifdef CONFIG_X86_64
163
164#if defined(__x86_64__) && !defined(__arch_um__)
165 161
166/* 162/*
167 * Unrolled-by-4 SSE2 implementation 163 * Unrolled-by-4 SSE2 implementation
@@ -259,4 +255,4 @@ const struct raid6_calls raid6_sse2x4 = {
259 1 /* Has cache hints */ 255 1 /* Has cache hints */
260}; 256};
261 257
262#endif 258#endif /* CONFIG_X86_64 */
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index c76151d94764..087332dbf8aa 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -10,6 +10,31 @@ LD = ld
10AWK = awk -f 10AWK = awk -f
11AR = ar 11AR = ar
12RANLIB = ranlib 12RANLIB = ranlib
13OBJS = int1.o int2.o int4.o int8.o int16.o int32.o recov.o algos.o tables.o
14
15ARCH := $(shell uname -m 2>/dev/null | sed -e /s/i.86/i386/)
16ifeq ($(ARCH),i386)
17 CFLAGS += -DCONFIG_X86_32
18 IS_X86 = yes
19endif
20ifeq ($(ARCH),x86_64)
21 CFLAGS += -DCONFIG_X86_64
22 IS_X86 = yes
23endif
24
25ifeq ($(IS_X86),yes)
26 OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o
27 CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \
28 gcc -c -x assembler - >&/dev/null && \
29 rm ./-.o && echo -DCONFIG_AS_AVX2=1)
30else
31 HAS_ALTIVEC := $(shell echo -e '\#include <altivec.h>\nvector int a;' |\
32 gcc -c -x c - >&/dev/null && \
33 rm ./-.o && echo yes)
34 ifeq ($(HAS_ALTIVEC),yes)
35 OBJS += altivec1.o altivec2.o altivec4.o altivec8.o
36 endif
37endif
13 38
14.c.o: 39.c.o:
15 $(CC) $(CFLAGS) -c -o $@ $< 40 $(CC) $(CFLAGS) -c -o $@ $<
@@ -22,9 +47,7 @@ RANLIB = ranlib
22 47
23all: raid6.a raid6test 48all: raid6.a raid6test
24 49
25raid6.a: int1.o int2.o int4.o int8.o int16.o int32.o mmx.o sse1.o sse2.o \ 50raid6.a: $(OBJS)
26 altivec1.o altivec2.o altivec4.o altivec8.o recov.o recov_ssse3.o algos.o \
27 tables.o
28 rm -f $@ 51 rm -f $@
29 $(AR) cq $@ $^ 52 $(AR) cq $@ $^
30 $(RANLIB) $@ 53 $(RANLIB) $@
diff --git a/lib/raid6/x86.h b/lib/raid6/x86.h
index d55d63232c55..b7595484a815 100644
--- a/lib/raid6/x86.h
+++ b/lib/raid6/x86.h
@@ -45,19 +45,23 @@ static inline void kernel_fpu_end(void)
45#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ 45#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
46#define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */ 46#define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */
47#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */ 47#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
48#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
48#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ 49#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
49 50
50/* Should work well enough on modern CPUs for testing */ 51/* Should work well enough on modern CPUs for testing */
51static inline int boot_cpu_has(int flag) 52static inline int boot_cpu_has(int flag)
52{ 53{
53 u32 eax = (flag & 0x20) ? 0x80000001 : 1; 54 u32 eax, ebx, ecx, edx;
54 u32 ecx, edx; 55
56 eax = (flag & 0x100) ? 7 :
57 (flag & 0x20) ? 0x80000001 : 1;
58 ecx = 0;
55 59
56 asm volatile("cpuid" 60 asm volatile("cpuid"
57 : "+a" (eax), "=d" (edx), "=c" (ecx) 61 : "+a" (eax), "=b" (ebx), "=d" (edx), "+c" (ecx));
58 : : "ebx");
59 62
60 return ((flag & 0x80 ? ecx : edx) >> (flag & 31)) & 1; 63 return ((flag & 0x100 ? ebx :
64 (flag & 0x80) ? ecx : edx) >> (flag & 31)) & 1;
61} 65}
62 66
63#endif /* ndef __KERNEL__ */ 67#endif /* ndef __KERNEL__ */