diff options
author | Jussi Kivilinna <jussi.kivilinna@mbnet.fi> | 2013-01-19 06:39:36 -0500 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2013-01-19 18:16:50 -0500 |
commit | 2dcfd44dee3fd3a63e3e3d3f5cbfd2436d1f98a6 (patch) | |
tree | 043570d183b9e600d08d2592c18b7ae927d756f0 /arch/x86/crypto | |
parent | 044438082cf1447e37534b24beff723835464954 (diff) |
crypto: x86/serpent - use ENTRY/ENDPROC for assember functions and localize jump targets
Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Acked-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/x86/crypto')
-rw-r--r-- | arch/x86/crypto/serpent-avx-x86_64-asm_64.S | 35 | ||||
-rw-r--r-- | arch/x86/crypto/serpent-sse2-i586-asm_32.S | 20 | ||||
-rw-r--r-- | arch/x86/crypto/serpent-sse2-x86_64-asm_64.S | 20 |
3 files changed, 27 insertions, 48 deletions
diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S index 02b0e9fe997c..43c938612b74 100644 --- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S | |||
@@ -24,6 +24,7 @@ | |||
24 | * | 24 | * |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/linkage.h> | ||
27 | #include "glue_helper-asm-avx.S" | 28 | #include "glue_helper-asm-avx.S" |
28 | 29 | ||
29 | .file "serpent-avx-x86_64-asm_64.S" | 30 | .file "serpent-avx-x86_64-asm_64.S" |
@@ -566,8 +567,6 @@ | |||
566 | transpose_4x4(x0, x1, x2, x3, t0, t1, t2) | 567 | transpose_4x4(x0, x1, x2, x3, t0, t1, t2) |
567 | 568 | ||
568 | .align 8 | 569 | .align 8 |
569 | .type __serpent_enc_blk8_avx,@function; | ||
570 | |||
571 | __serpent_enc_blk8_avx: | 570 | __serpent_enc_blk8_avx: |
572 | /* input: | 571 | /* input: |
573 | * %rdi: ctx, CTX | 572 | * %rdi: ctx, CTX |
@@ -619,10 +618,9 @@ __serpent_enc_blk8_avx: | |||
619 | write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); | 618 | write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); |
620 | 619 | ||
621 | ret; | 620 | ret; |
621 | ENDPROC(__serpent_enc_blk8_avx) | ||
622 | 622 | ||
623 | .align 8 | 623 | .align 8 |
624 | .type __serpent_dec_blk8_avx,@function; | ||
625 | |||
626 | __serpent_dec_blk8_avx: | 624 | __serpent_dec_blk8_avx: |
627 | /* input: | 625 | /* input: |
628 | * %rdi: ctx, CTX | 626 | * %rdi: ctx, CTX |
@@ -674,12 +672,9 @@ __serpent_dec_blk8_avx: | |||
674 | write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); | 672 | write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); |
675 | 673 | ||
676 | ret; | 674 | ret; |
675 | ENDPROC(__serpent_dec_blk8_avx) | ||
677 | 676 | ||
678 | .align 8 | 677 | ENTRY(serpent_ecb_enc_8way_avx) |
679 | .global serpent_ecb_enc_8way_avx | ||
680 | .type serpent_ecb_enc_8way_avx,@function; | ||
681 | |||
682 | serpent_ecb_enc_8way_avx: | ||
683 | /* input: | 678 | /* input: |
684 | * %rdi: ctx, CTX | 679 | * %rdi: ctx, CTX |
685 | * %rsi: dst | 680 | * %rsi: dst |
@@ -693,12 +688,9 @@ serpent_ecb_enc_8way_avx: | |||
693 | store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); | 688 | store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); |
694 | 689 | ||
695 | ret; | 690 | ret; |
691 | ENDPROC(serpent_ecb_enc_8way_avx) | ||
696 | 692 | ||
697 | .align 8 | 693 | ENTRY(serpent_ecb_dec_8way_avx) |
698 | .global serpent_ecb_dec_8way_avx | ||
699 | .type serpent_ecb_dec_8way_avx,@function; | ||
700 | |||
701 | serpent_ecb_dec_8way_avx: | ||
702 | /* input: | 694 | /* input: |
703 | * %rdi: ctx, CTX | 695 | * %rdi: ctx, CTX |
704 | * %rsi: dst | 696 | * %rsi: dst |
@@ -712,12 +704,9 @@ serpent_ecb_dec_8way_avx: | |||
712 | store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); | 704 | store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); |
713 | 705 | ||
714 | ret; | 706 | ret; |
707 | ENDPROC(serpent_ecb_dec_8way_avx) | ||
715 | 708 | ||
716 | .align 8 | 709 | ENTRY(serpent_cbc_dec_8way_avx) |
717 | .global serpent_cbc_dec_8way_avx | ||
718 | .type serpent_cbc_dec_8way_avx,@function; | ||
719 | |||
720 | serpent_cbc_dec_8way_avx: | ||
721 | /* input: | 710 | /* input: |
722 | * %rdi: ctx, CTX | 711 | * %rdi: ctx, CTX |
723 | * %rsi: dst | 712 | * %rsi: dst |
@@ -731,12 +720,9 @@ serpent_cbc_dec_8way_avx: | |||
731 | store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); | 720 | store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); |
732 | 721 | ||
733 | ret; | 722 | ret; |
723 | ENDPROC(serpent_cbc_dec_8way_avx) | ||
734 | 724 | ||
735 | .align 8 | 725 | ENTRY(serpent_ctr_8way_avx) |
736 | .global serpent_ctr_8way_avx | ||
737 | .type serpent_ctr_8way_avx,@function; | ||
738 | |||
739 | serpent_ctr_8way_avx: | ||
740 | /* input: | 726 | /* input: |
741 | * %rdi: ctx, CTX | 727 | * %rdi: ctx, CTX |
742 | * %rsi: dst | 728 | * %rsi: dst |
@@ -752,3 +738,4 @@ serpent_ctr_8way_avx: | |||
752 | store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); | 738 | store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); |
753 | 739 | ||
754 | ret; | 740 | ret; |
741 | ENDPROC(serpent_ctr_8way_avx) | ||
diff --git a/arch/x86/crypto/serpent-sse2-i586-asm_32.S b/arch/x86/crypto/serpent-sse2-i586-asm_32.S index c00053d42f99..d348f1553a79 100644 --- a/arch/x86/crypto/serpent-sse2-i586-asm_32.S +++ b/arch/x86/crypto/serpent-sse2-i586-asm_32.S | |||
@@ -24,6 +24,8 @@ | |||
24 | * | 24 | * |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/linkage.h> | ||
28 | |||
27 | .file "serpent-sse2-i586-asm_32.S" | 29 | .file "serpent-sse2-i586-asm_32.S" |
28 | .text | 30 | .text |
29 | 31 | ||
@@ -510,11 +512,7 @@ | |||
510 | pxor t0, x3; \ | 512 | pxor t0, x3; \ |
511 | movdqu x3, (3*4*4)(out); | 513 | movdqu x3, (3*4*4)(out); |
512 | 514 | ||
513 | .align 8 | 515 | ENTRY(__serpent_enc_blk_4way) |
514 | .global __serpent_enc_blk_4way | ||
515 | .type __serpent_enc_blk_4way,@function; | ||
516 | |||
517 | __serpent_enc_blk_4way: | ||
518 | /* input: | 516 | /* input: |
519 | * arg_ctx(%esp): ctx, CTX | 517 | * arg_ctx(%esp): ctx, CTX |
520 | * arg_dst(%esp): dst | 518 | * arg_dst(%esp): dst |
@@ -566,22 +564,19 @@ __serpent_enc_blk_4way: | |||
566 | movl arg_dst(%esp), %eax; | 564 | movl arg_dst(%esp), %eax; |
567 | 565 | ||
568 | cmpb $0, arg_xor(%esp); | 566 | cmpb $0, arg_xor(%esp); |
569 | jnz __enc_xor4; | 567 | jnz .L__enc_xor4; |
570 | 568 | ||
571 | write_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); | 569 | write_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); |
572 | 570 | ||
573 | ret; | 571 | ret; |
574 | 572 | ||
575 | __enc_xor4: | 573 | .L__enc_xor4: |
576 | xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); | 574 | xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); |
577 | 575 | ||
578 | ret; | 576 | ret; |
577 | ENDPROC(__serpent_enc_blk_4way) | ||
579 | 578 | ||
580 | .align 8 | 579 | ENTRY(serpent_dec_blk_4way) |
581 | .global serpent_dec_blk_4way | ||
582 | .type serpent_dec_blk_4way,@function; | ||
583 | |||
584 | serpent_dec_blk_4way: | ||
585 | /* input: | 580 | /* input: |
586 | * arg_ctx(%esp): ctx, CTX | 581 | * arg_ctx(%esp): ctx, CTX |
587 | * arg_dst(%esp): dst | 582 | * arg_dst(%esp): dst |
@@ -633,3 +628,4 @@ serpent_dec_blk_4way: | |||
633 | write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA); | 628 | write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA); |
634 | 629 | ||
635 | ret; | 630 | ret; |
631 | ENDPROC(serpent_dec_blk_4way) | ||
diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S index 3ee1ff04d3e9..acc066c7c6b2 100644 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S | |||
@@ -24,6 +24,8 @@ | |||
24 | * | 24 | * |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/linkage.h> | ||
28 | |||
27 | .file "serpent-sse2-x86_64-asm_64.S" | 29 | .file "serpent-sse2-x86_64-asm_64.S" |
28 | .text | 30 | .text |
29 | 31 | ||
@@ -632,11 +634,7 @@ | |||
632 | pxor t0, x3; \ | 634 | pxor t0, x3; \ |
633 | movdqu x3, (3*4*4)(out); | 635 | movdqu x3, (3*4*4)(out); |
634 | 636 | ||
635 | .align 8 | 637 | ENTRY(__serpent_enc_blk_8way) |
636 | .global __serpent_enc_blk_8way | ||
637 | .type __serpent_enc_blk_8way,@function; | ||
638 | |||
639 | __serpent_enc_blk_8way: | ||
640 | /* input: | 638 | /* input: |
641 | * %rdi: ctx, CTX | 639 | * %rdi: ctx, CTX |
642 | * %rsi: dst | 640 | * %rsi: dst |
@@ -687,24 +685,21 @@ __serpent_enc_blk_8way: | |||
687 | leaq (4*4*4)(%rsi), %rax; | 685 | leaq (4*4*4)(%rsi), %rax; |
688 | 686 | ||
689 | testb %cl, %cl; | 687 | testb %cl, %cl; |
690 | jnz __enc_xor8; | 688 | jnz .L__enc_xor8; |
691 | 689 | ||
692 | write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); | 690 | write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); |
693 | write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); | 691 | write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); |
694 | 692 | ||
695 | ret; | 693 | ret; |
696 | 694 | ||
697 | __enc_xor8: | 695 | .L__enc_xor8: |
698 | xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); | 696 | xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); |
699 | xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); | 697 | xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); |
700 | 698 | ||
701 | ret; | 699 | ret; |
700 | ENDPROC(__serpent_enc_blk_8way) | ||
702 | 701 | ||
703 | .align 8 | 702 | ENTRY(serpent_dec_blk_8way) |
704 | .global serpent_dec_blk_8way | ||
705 | .type serpent_dec_blk_8way,@function; | ||
706 | |||
707 | serpent_dec_blk_8way: | ||
708 | /* input: | 703 | /* input: |
709 | * %rdi: ctx, CTX | 704 | * %rdi: ctx, CTX |
710 | * %rsi: dst | 705 | * %rsi: dst |
@@ -756,3 +751,4 @@ serpent_dec_blk_8way: | |||
756 | write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2); | 751 | write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2); |
757 | 752 | ||
758 | ret; | 753 | ret; |
754 | ENDPROC(serpent_dec_blk_8way) | ||