diff options
| -rw-r--r-- | lib/raid6/sse2.c | 230 |
1 files changed, 227 insertions, 3 deletions
diff --git a/lib/raid6/sse2.c b/lib/raid6/sse2.c index 31acd59a0ef7..1d2276b007ee 100644 --- a/lib/raid6/sse2.c +++ b/lib/raid6/sse2.c | |||
| @@ -88,9 +88,58 @@ static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs) | |||
| 88 | kernel_fpu_end(); | 88 | kernel_fpu_end(); |
| 89 | } | 89 | } |
| 90 | 90 | ||
| 91 | |||
| 92 | static void raid6_sse21_xor_syndrome(int disks, int start, int stop, | ||
| 93 | size_t bytes, void **ptrs) | ||
| 94 | { | ||
| 95 | u8 **dptr = (u8 **)ptrs; | ||
| 96 | u8 *p, *q; | ||
| 97 | int d, z, z0; | ||
| 98 | |||
| 99 | z0 = stop; /* P/Q right side optimization */ | ||
| 100 | p = dptr[disks-2]; /* XOR parity */ | ||
| 101 | q = dptr[disks-1]; /* RS syndrome */ | ||
| 102 | |||
| 103 | kernel_fpu_begin(); | ||
| 104 | |||
| 105 | asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0])); | ||
| 106 | |||
| 107 | for ( d = 0 ; d < bytes ; d += 16 ) { | ||
| 108 | asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d])); | ||
| 109 | asm volatile("movdqa %0,%%xmm2" : : "m" (p[d])); | ||
| 110 | asm volatile("pxor %xmm4,%xmm2"); | ||
| 111 | /* P/Q data pages */ | ||
| 112 | for ( z = z0-1 ; z >= start ; z-- ) { | ||
| 113 | asm volatile("pxor %xmm5,%xmm5"); | ||
| 114 | asm volatile("pcmpgtb %xmm4,%xmm5"); | ||
| 115 | asm volatile("paddb %xmm4,%xmm4"); | ||
| 116 | asm volatile("pand %xmm0,%xmm5"); | ||
| 117 | asm volatile("pxor %xmm5,%xmm4"); | ||
| 118 | asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d])); | ||
| 119 | asm volatile("pxor %xmm5,%xmm2"); | ||
| 120 | asm volatile("pxor %xmm5,%xmm4"); | ||
| 121 | } | ||
| 122 | /* P/Q left side optimization */ | ||
| 123 | for ( z = start-1 ; z >= 0 ; z-- ) { | ||
| 124 | asm volatile("pxor %xmm5,%xmm5"); | ||
| 125 | asm volatile("pcmpgtb %xmm4,%xmm5"); | ||
| 126 | asm volatile("paddb %xmm4,%xmm4"); | ||
| 127 | asm volatile("pand %xmm0,%xmm5"); | ||
| 128 | asm volatile("pxor %xmm5,%xmm4"); | ||
| 129 | } | ||
| 130 | asm volatile("pxor %0,%%xmm4" : : "m" (q[d])); | ||
| 131 | /* Don't use movntdq for r/w memory area < cache line */ | ||
| 132 | asm volatile("movdqa %%xmm4,%0" : "=m" (q[d])); | ||
| 133 | asm volatile("movdqa %%xmm2,%0" : "=m" (p[d])); | ||
| 134 | } | ||
| 135 | |||
| 136 | asm volatile("sfence" : : : "memory"); | ||
| 137 | kernel_fpu_end(); | ||
| 138 | } | ||
| 139 | |||
| 91 | const struct raid6_calls raid6_sse2x1 = { | 140 | const struct raid6_calls raid6_sse2x1 = { |
| 92 | raid6_sse21_gen_syndrome, | 141 | raid6_sse21_gen_syndrome, |
| 93 | NULL, /* XOR not yet implemented */ | 142 | raid6_sse21_xor_syndrome, |
| 94 | raid6_have_sse2, | 143 | raid6_have_sse2, |
| 95 | "sse2x1", | 144 | "sse2x1", |
| 96 | 1 /* Has cache hints */ | 145 | 1 /* Has cache hints */ |
| @@ -151,9 +200,76 @@ static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs) | |||
| 151 | kernel_fpu_end(); | 200 | kernel_fpu_end(); |
| 152 | } | 201 | } |
| 153 | 202 | ||
| 203 | static void raid6_sse22_xor_syndrome(int disks, int start, int stop, | ||
| 204 | size_t bytes, void **ptrs) | ||
| 205 | { | ||
| 206 | u8 **dptr = (u8 **)ptrs; | ||
| 207 | u8 *p, *q; | ||
| 208 | int d, z, z0; | ||
| 209 | |||
| 210 | z0 = stop; /* P/Q right side optimization */ | ||
| 211 | p = dptr[disks-2]; /* XOR parity */ | ||
| 212 | q = dptr[disks-1]; /* RS syndrome */ | ||
| 213 | |||
| 214 | kernel_fpu_begin(); | ||
| 215 | |||
| 216 | asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0])); | ||
| 217 | |||
| 218 | for ( d = 0 ; d < bytes ; d += 32 ) { | ||
| 219 | asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d])); | ||
| 220 | asm volatile("movdqa %0,%%xmm6" :: "m" (dptr[z0][d+16])); | ||
| 221 | asm volatile("movdqa %0,%%xmm2" : : "m" (p[d])); | ||
| 222 | asm volatile("movdqa %0,%%xmm3" : : "m" (p[d+16])); | ||
| 223 | asm volatile("pxor %xmm4,%xmm2"); | ||
| 224 | asm volatile("pxor %xmm6,%xmm3"); | ||
| 225 | /* P/Q data pages */ | ||
| 226 | for ( z = z0-1 ; z >= start ; z-- ) { | ||
| 227 | asm volatile("pxor %xmm5,%xmm5"); | ||
| 228 | asm volatile("pxor %xmm7,%xmm7"); | ||
| 229 | asm volatile("pcmpgtb %xmm4,%xmm5"); | ||
| 230 | asm volatile("pcmpgtb %xmm6,%xmm7"); | ||
| 231 | asm volatile("paddb %xmm4,%xmm4"); | ||
| 232 | asm volatile("paddb %xmm6,%xmm6"); | ||
| 233 | asm volatile("pand %xmm0,%xmm5"); | ||
| 234 | asm volatile("pand %xmm0,%xmm7"); | ||
| 235 | asm volatile("pxor %xmm5,%xmm4"); | ||
| 236 | asm volatile("pxor %xmm7,%xmm6"); | ||
| 237 | asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d])); | ||
| 238 | asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16])); | ||
| 239 | asm volatile("pxor %xmm5,%xmm2"); | ||
| 240 | asm volatile("pxor %xmm7,%xmm3"); | ||
| 241 | asm volatile("pxor %xmm5,%xmm4"); | ||
| 242 | asm volatile("pxor %xmm7,%xmm6"); | ||
| 243 | } | ||
| 244 | /* P/Q left side optimization */ | ||
| 245 | for ( z = start-1 ; z >= 0 ; z-- ) { | ||
| 246 | asm volatile("pxor %xmm5,%xmm5"); | ||
| 247 | asm volatile("pxor %xmm7,%xmm7"); | ||
| 248 | asm volatile("pcmpgtb %xmm4,%xmm5"); | ||
| 249 | asm volatile("pcmpgtb %xmm6,%xmm7"); | ||
| 250 | asm volatile("paddb %xmm4,%xmm4"); | ||
| 251 | asm volatile("paddb %xmm6,%xmm6"); | ||
| 252 | asm volatile("pand %xmm0,%xmm5"); | ||
| 253 | asm volatile("pand %xmm0,%xmm7"); | ||
| 254 | asm volatile("pxor %xmm5,%xmm4"); | ||
| 255 | asm volatile("pxor %xmm7,%xmm6"); | ||
| 256 | } | ||
| 257 | asm volatile("pxor %0,%%xmm4" : : "m" (q[d])); | ||
| 258 | asm volatile("pxor %0,%%xmm6" : : "m" (q[d+16])); | ||
| 259 | /* Don't use movntdq for r/w memory area < cache line */ | ||
| 260 | asm volatile("movdqa %%xmm4,%0" : "=m" (q[d])); | ||
| 261 | asm volatile("movdqa %%xmm6,%0" : "=m" (q[d+16])); | ||
| 262 | asm volatile("movdqa %%xmm2,%0" : "=m" (p[d])); | ||
| 263 | asm volatile("movdqa %%xmm3,%0" : "=m" (p[d+16])); | ||
| 264 | } | ||
| 265 | |||
| 266 | asm volatile("sfence" : : : "memory"); | ||
| 267 | kernel_fpu_end(); | ||
| 268 | } | ||
| 269 | |||
| 154 | const struct raid6_calls raid6_sse2x2 = { | 270 | const struct raid6_calls raid6_sse2x2 = { |
| 155 | raid6_sse22_gen_syndrome, | 271 | raid6_sse22_gen_syndrome, |
| 156 | NULL, /* XOR not yet implemented */ | 272 | raid6_sse22_xor_syndrome, |
| 157 | raid6_have_sse2, | 273 | raid6_have_sse2, |
| 158 | "sse2x2", | 274 | "sse2x2", |
| 159 | 1 /* Has cache hints */ | 275 | 1 /* Has cache hints */ |
| @@ -250,9 +366,117 @@ static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs) | |||
| 250 | kernel_fpu_end(); | 366 | kernel_fpu_end(); |
| 251 | } | 367 | } |
| 252 | 368 | ||
| 369 | static void raid6_sse24_xor_syndrome(int disks, int start, int stop, | ||
| 370 | size_t bytes, void **ptrs) | ||
| 371 | { | ||
| 372 | u8 **dptr = (u8 **)ptrs; | ||
| 373 | u8 *p, *q; | ||
| 374 | int d, z, z0; | ||
| 375 | |||
| 376 | z0 = stop; /* P/Q right side optimization */ | ||
| 377 | p = dptr[disks-2]; /* XOR parity */ | ||
| 378 | q = dptr[disks-1]; /* RS syndrome */ | ||
| 379 | |||
| 380 | kernel_fpu_begin(); | ||
| 381 | |||
| 382 | asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0])); | ||
| 383 | |||
| 384 | for ( d = 0 ; d < bytes ; d += 64 ) { | ||
| 385 | asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d])); | ||
| 386 | asm volatile("movdqa %0,%%xmm6" :: "m" (dptr[z0][d+16])); | ||
| 387 | asm volatile("movdqa %0,%%xmm12" :: "m" (dptr[z0][d+32])); | ||
| 388 | asm volatile("movdqa %0,%%xmm14" :: "m" (dptr[z0][d+48])); | ||
| 389 | asm volatile("movdqa %0,%%xmm2" : : "m" (p[d])); | ||
| 390 | asm volatile("movdqa %0,%%xmm3" : : "m" (p[d+16])); | ||
| 391 | asm volatile("movdqa %0,%%xmm10" : : "m" (p[d+32])); | ||
| 392 | asm volatile("movdqa %0,%%xmm11" : : "m" (p[d+48])); | ||
| 393 | asm volatile("pxor %xmm4,%xmm2"); | ||
| 394 | asm volatile("pxor %xmm6,%xmm3"); | ||
| 395 | asm volatile("pxor %xmm12,%xmm10"); | ||
| 396 | asm volatile("pxor %xmm14,%xmm11"); | ||
| 397 | /* P/Q data pages */ | ||
| 398 | for ( z = z0-1 ; z >= start ; z-- ) { | ||
| 399 | asm volatile("prefetchnta %0" :: "m" (dptr[z][d])); | ||
| 400 | asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32])); | ||
| 401 | asm volatile("pxor %xmm5,%xmm5"); | ||
| 402 | asm volatile("pxor %xmm7,%xmm7"); | ||
| 403 | asm volatile("pxor %xmm13,%xmm13"); | ||
| 404 | asm volatile("pxor %xmm15,%xmm15"); | ||
| 405 | asm volatile("pcmpgtb %xmm4,%xmm5"); | ||
| 406 | asm volatile("pcmpgtb %xmm6,%xmm7"); | ||
| 407 | asm volatile("pcmpgtb %xmm12,%xmm13"); | ||
| 408 | asm volatile("pcmpgtb %xmm14,%xmm15"); | ||
| 409 | asm volatile("paddb %xmm4,%xmm4"); | ||
| 410 | asm volatile("paddb %xmm6,%xmm6"); | ||
| 411 | asm volatile("paddb %xmm12,%xmm12"); | ||
| 412 | asm volatile("paddb %xmm14,%xmm14"); | ||
| 413 | asm volatile("pand %xmm0,%xmm5"); | ||
| 414 | asm volatile("pand %xmm0,%xmm7"); | ||
| 415 | asm volatile("pand %xmm0,%xmm13"); | ||
| 416 | asm volatile("pand %xmm0,%xmm15"); | ||
| 417 | asm volatile("pxor %xmm5,%xmm4"); | ||
| 418 | asm volatile("pxor %xmm7,%xmm6"); | ||
| 419 | asm volatile("pxor %xmm13,%xmm12"); | ||
| 420 | asm volatile("pxor %xmm15,%xmm14"); | ||
| 421 | asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d])); | ||
| 422 | asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16])); | ||
| 423 | asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32])); | ||
| 424 | asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48])); | ||
| 425 | asm volatile("pxor %xmm5,%xmm2"); | ||
| 426 | asm volatile("pxor %xmm7,%xmm3"); | ||
| 427 | asm volatile("pxor %xmm13,%xmm10"); | ||
| 428 | asm volatile("pxor %xmm15,%xmm11"); | ||
| 429 | asm volatile("pxor %xmm5,%xmm4"); | ||
| 430 | asm volatile("pxor %xmm7,%xmm6"); | ||
| 431 | asm volatile("pxor %xmm13,%xmm12"); | ||
| 432 | asm volatile("pxor %xmm15,%xmm14"); | ||
| 433 | } | ||
| 434 | asm volatile("prefetchnta %0" :: "m" (q[d])); | ||
| 435 | asm volatile("prefetchnta %0" :: "m" (q[d+32])); | ||
| 436 | /* P/Q left side optimization */ | ||
| 437 | for ( z = start-1 ; z >= 0 ; z-- ) { | ||
| 438 | asm volatile("pxor %xmm5,%xmm5"); | ||
| 439 | asm volatile("pxor %xmm7,%xmm7"); | ||
| 440 | asm volatile("pxor %xmm13,%xmm13"); | ||
| 441 | asm volatile("pxor %xmm15,%xmm15"); | ||
| 442 | asm volatile("pcmpgtb %xmm4,%xmm5"); | ||
| 443 | asm volatile("pcmpgtb %xmm6,%xmm7"); | ||
| 444 | asm volatile("pcmpgtb %xmm12,%xmm13"); | ||
| 445 | asm volatile("pcmpgtb %xmm14,%xmm15"); | ||
| 446 | asm volatile("paddb %xmm4,%xmm4"); | ||
| 447 | asm volatile("paddb %xmm6,%xmm6"); | ||
| 448 | asm volatile("paddb %xmm12,%xmm12"); | ||
| 449 | asm volatile("paddb %xmm14,%xmm14"); | ||
| 450 | asm volatile("pand %xmm0,%xmm5"); | ||
| 451 | asm volatile("pand %xmm0,%xmm7"); | ||
| 452 | asm volatile("pand %xmm0,%xmm13"); | ||
| 453 | asm volatile("pand %xmm0,%xmm15"); | ||
| 454 | asm volatile("pxor %xmm5,%xmm4"); | ||
| 455 | asm volatile("pxor %xmm7,%xmm6"); | ||
| 456 | asm volatile("pxor %xmm13,%xmm12"); | ||
| 457 | asm volatile("pxor %xmm15,%xmm14"); | ||
| 458 | } | ||
| 459 | asm volatile("movntdq %%xmm2,%0" : "=m" (p[d])); | ||
| 460 | asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16])); | ||
| 461 | asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32])); | ||
| 462 | asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48])); | ||
| 463 | asm volatile("pxor %0,%%xmm4" : : "m" (q[d])); | ||
| 464 | asm volatile("pxor %0,%%xmm6" : : "m" (q[d+16])); | ||
| 465 | asm volatile("pxor %0,%%xmm12" : : "m" (q[d+32])); | ||
| 466 | asm volatile("pxor %0,%%xmm14" : : "m" (q[d+48])); | ||
| 467 | asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); | ||
| 468 | asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16])); | ||
| 469 | asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32])); | ||
| 470 | asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48])); | ||
| 471 | } | ||
| 472 | asm volatile("sfence" : : : "memory"); | ||
| 473 | kernel_fpu_end(); | ||
| 474 | } | ||
| 475 | |||
| 476 | |||
| 253 | const struct raid6_calls raid6_sse2x4 = { | 477 | const struct raid6_calls raid6_sse2x4 = { |
| 254 | raid6_sse24_gen_syndrome, | 478 | raid6_sse24_gen_syndrome, |
| 255 | NULL, /* XOR not yet implemented */ | 479 | raid6_sse24_xor_syndrome, |
| 256 | raid6_have_sse2, | 480 | raid6_have_sse2, |
| 257 | "sse2x4", | 481 | "sse2x4", |
| 258 | 1 /* Has cache hints */ | 482 | 1 /* Has cache hints */ |
