summaryrefslogtreecommitdiffstats
path: root/lib/raid6
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2017-07-13 13:16:00 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2017-08-09 13:51:57 -0400
commit35129dde88afad07f54b332d4f9eda2d254b80f2 (patch)
tree9d9d78d7f3534ac9806ae3189b0273e05be77916 /lib/raid6
parentf39c3f9b10513dc9cb8a760a74a8141383c7046e (diff)
md/raid6: use faster multiplication for ARM NEON delta syndrome
The P/Q left side optimization in the delta syndrome simply involves repeatedly multiplying a value by polynomial 'x' in GF(2^8). Given that 'x * x * x * x' equals 'x^4' even in the polynomial world, we can accelerate this substantially by performing up to 4 such operations at once, using the NEON instructions for polynomial multiplication. Results on a Cortex-A57 running in 64-bit mode: Before: ------- raid6: neonx1 xor() 1680 MB/s raid6: neonx2 xor() 2286 MB/s raid6: neonx4 xor() 3162 MB/s raid6: neonx8 xor() 3389 MB/s After: ------ raid6: neonx1 xor() 2281 MB/s raid6: neonx2 xor() 3362 MB/s raid6: neonx4 xor() 3787 MB/s raid6: neonx8 xor() 4239 MB/s While we're at it, simplify MASK() by using a signed shift rather than a vector compare involving a temp register. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'lib/raid6')
-rw-r--r--lib/raid6/neon.uc33
1 files changed, 30 insertions, 3 deletions
diff --git a/lib/raid6/neon.uc b/lib/raid6/neon.uc
index 4fa51b761dd0..d5242f544551 100644
--- a/lib/raid6/neon.uc
+++ b/lib/raid6/neon.uc
@@ -46,8 +46,12 @@ static inline unative_t SHLBYTE(unative_t v)
46 */ 46 */
47static inline unative_t MASK(unative_t v) 47static inline unative_t MASK(unative_t v)
48{ 48{
49 const uint8x16_t temp = NBYTES(0); 49 return (unative_t)vshrq_n_s8((int8x16_t)v, 7);
50 return (unative_t)vcltq_s8((int8x16_t)v, (int8x16_t)temp); 50}
51
52static inline unative_t PMUL(unative_t v, unative_t u)
53{
54 return (unative_t)vmulq_p8((poly8x16_t)v, (poly8x16_t)u);
51} 55}
52 56
53void raid6_neon$#_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs) 57void raid6_neon$#_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs)
@@ -110,7 +114,30 @@ void raid6_neon$#_xor_syndrome_real(int disks, int start, int stop,
110 wq$$ = veorq_u8(w1$$, wd$$); 114 wq$$ = veorq_u8(w1$$, wd$$);
111 } 115 }
112 /* P/Q left side optimization */ 116 /* P/Q left side optimization */
113 for ( z = start-1 ; z >= 0 ; z-- ) { 117 for ( z = start-1 ; z >= 3 ; z -= 4 ) {
118 w2$$ = vshrq_n_u8(wq$$, 4);
119 w1$$ = vshlq_n_u8(wq$$, 4);
120
121 w2$$ = PMUL(w2$$, x1d);
122 wq$$ = veorq_u8(w1$$, w2$$);
123 }
124
125 switch (z) {
126 case 2:
127 w2$$ = vshrq_n_u8(wq$$, 5);
128 w1$$ = vshlq_n_u8(wq$$, 3);
129
130 w2$$ = PMUL(w2$$, x1d);
131 wq$$ = veorq_u8(w1$$, w2$$);
132 break;
133 case 1:
134 w2$$ = vshrq_n_u8(wq$$, 6);
135 w1$$ = vshlq_n_u8(wq$$, 2);
136
137 w2$$ = PMUL(w2$$, x1d);
138 wq$$ = veorq_u8(w1$$, w2$$);
139 break;
140 case 0:
114 w2$$ = MASK(wq$$); 141 w2$$ = MASK(wq$$);
115 w1$$ = SHLBYTE(wq$$); 142 w1$$ = SHLBYTE(wq$$);
116 143