aboutsummaryrefslogblamecommitdiffstats
path: root/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
blob: 94693c877e3b5735b1ec843ca835694075dce29f (plain) (tree)

































































































































































































































































































































                                                                        
/*
 * Cast5 Cipher 16-way parallel algorithm (AVX/x86_64)
 *
 * Copyright (C) 2012 Johannes Goetzfried
 *     <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
 * USA
 *
 */

.file "cast5-avx-x86_64-asm_64.S"
.text

.extern cast5_s1
.extern cast5_s2
.extern cast5_s3
.extern cast5_s4

/* structure of crypto context */
#define km	0
#define kr	(16*4)
#define rr	((16*4)+16)

/* s-boxes */
#define s1	cast5_s1
#define s2	cast5_s2
#define s3	cast5_s3
#define s4	cast5_s4

/**********************************************************************
  16-way AVX cast5
 **********************************************************************/
#define CTX %rdi

#define RL1 %xmm0
#define RR1 %xmm1
#define RL2 %xmm2
#define RR2 %xmm3
#define RL3 %xmm4
#define RR3 %xmm5
#define RL4 %xmm6
#define RR4 %xmm7

#define RX %xmm8

#define RKM  %xmm9
#define RKRF %xmm10
#define RKRR %xmm11

#define RTMP  %xmm12
#define RMASK %xmm13
#define R32   %xmm14

#define RID1  %rax
#define RID1b %al
#define RID2  %rbx
#define RID2b %bl

#define RGI1   %rdx
#define RGI1bl %dl
#define RGI1bh %dh
#define RGI2   %rcx
#define RGI2bl %cl
#define RGI2bh %ch

#define RFS1  %r8
#define RFS1d %r8d
#define RFS2  %r9
#define RFS2d %r9d
#define RFS3  %r10
#define RFS3d %r10d


#define lookup_32bit(src, dst, op1, op2, op3) \
	movb		src ## bl,     RID1b;    \
	movb		src ## bh,     RID2b;    \
	movl		s1(, RID1, 4), dst ## d; \
	op1		s2(, RID2, 4), dst ## d; \
	shrq $16,	src;                     \
	movb		src ## bl,     RID1b;    \
	movb		src ## bh,     RID2b;    \
	op2		s3(, RID1, 4), dst ## d; \
	op3		s4(, RID2, 4), dst ## d;

#define F(a, x, op0, op1, op2, op3) \
	op0	a,	RKM,  x;                 \
	vpslld  RKRF,	x,    RTMP;              \
	vpsrld  RKRR,	x,    x;                 \
	vpor	RTMP,	x,    x;                 \
	\
	vpshufb	RMASK,	x,    x;                 \
	vmovq		x,    RGI1;              \
	vpsrldq $8,	x,    x;                 \
	vmovq		x,    RGI2;              \
	\
	lookup_32bit(RGI1, RFS1, op1, op2, op3); \
	shrq $16,	RGI1;                    \
	lookup_32bit(RGI1, RFS2, op1, op2, op3); \
	shlq $32,	RFS2;                    \
	orq		RFS1, RFS2;              \
	\
	lookup_32bit(RGI2, RFS1, op1, op2, op3); \
	shrq $16,	RGI2;                    \
	lookup_32bit(RGI2, RFS3, op1, op2, op3); \
	shlq $32,	RFS3;                    \
	orq		RFS1, RFS3;              \
	\
	vmovq		RFS2, x;                 \
	vpinsrq $1,	RFS3, x, x;

#define F1(b, x) F(b, x, vpaddd, xorl, subl, addl)
#define F2(b, x) F(b, x, vpxor,  subl, addl, xorl)
#define F3(b, x) F(b, x, vpsubd, addl, xorl, subl)

#define subround(a, b, x, n, f) \
	F ## f(b, x);  \
	vpxor a, x, a;

#define round(l, r, n, f) \
	vbroadcastss 	(km+(4*n))(CTX), RKM;        \
	vpinsrb $0,	(kr+n)(CTX),     RKRF, RKRF; \
	vpsubq		RKRF,            R32,  RKRR; \
	subround(l ## 1, r ## 1, RX, n, f);          \
	subround(l ## 2, r ## 2, RX, n, f);          \
	subround(l ## 3, r ## 3, RX, n, f);          \
	subround(l ## 4, r ## 4, RX, n, f);


#define transpose_2x4(x0, x1, t0, t1) \
	vpunpckldq		x1, x0, t0; \
	vpunpckhdq		x1, x0, t1; \
	\
	vpunpcklqdq		t1, t0, x0; \
	vpunpckhqdq		t1, t0, x1;

#define inpack_blocks(in, x0, x1, t0, t1) \
	vmovdqu (0*4*4)(in),	x0; \
	vmovdqu (1*4*4)(in),	x1; \
	vpshufb RMASK, x0,	x0; \
	vpshufb RMASK, x1,	x1; \
	\
	transpose_2x4(x0, x1, t0, t1)

#define outunpack_blocks(out, x0, x1, t0, t1) \
	transpose_2x4(x0, x1, t0, t1) \
	\
	vpshufb RMASK,	x0, x0;           \
	vpshufb RMASK,	x1, x1;           \
	vmovdqu 	x0, (0*4*4)(out); \
	vmovdqu		x1, (1*4*4)(out);

#define outunpack_xor_blocks(out, x0, x1, t0, t1) \
	transpose_2x4(x0, x1, t0, t1) \
	\
	vpshufb RMASK,	x0, x0;               \
	vpshufb RMASK,	x1, x1;               \
	vpxor		(0*4*4)(out), x0, x0; \
	vmovdqu 	x0, (0*4*4)(out);     \
	vpxor		(1*4*4)(out), x1, x1; \
	vmovdqu	        x1, (1*4*4)(out);

.align 16
.Lbswap_mask:
	.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
.L32_mask:
	.byte 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ,0, 0, 0, 0, 0

.align 16
.global __cast5_enc_blk_16way
.type   __cast5_enc_blk_16way,@function;

__cast5_enc_blk_16way:
	/* input:
	 *	%rdi: ctx, CTX
	 *	%rsi: dst
	 *	%rdx: src
	 *	%rcx: bool, if true: xor output
	 */

	pushq %rbx;
	pushq %rcx;

	vmovdqu .Lbswap_mask, RMASK;
	vmovdqu .L32_mask, R32;
	vpxor RKRF, RKRF, RKRF;

	inpack_blocks(%rdx, RL1, RR1, RTMP, RX);
	leaq (2*4*4)(%rdx), %rax;
	inpack_blocks(%rax, RL2, RR2, RTMP, RX);
	leaq (2*4*4)(%rax), %rax;
	inpack_blocks(%rax, RL3, RR3, RTMP, RX);
	leaq (2*4*4)(%rax), %rax;
	inpack_blocks(%rax, RL4, RR4, RTMP, RX);

	xorq RID1, RID1;
	xorq RID2, RID2;

	round(RL, RR, 0, 1);
	round(RR, RL, 1, 2);
	round(RL, RR, 2, 3);
	round(RR, RL, 3, 1);
	round(RL, RR, 4, 2);
	round(RR, RL, 5, 3);
	round(RL, RR, 6, 1);
	round(RR, RL, 7, 2);
	round(RL, RR, 8, 3);
	round(RR, RL, 9, 1);
	round(RL, RR, 10, 2);
	round(RR, RL, 11, 3);

	movb rr(CTX), %al;
	testb %al, %al;
	jnz __skip_enc;

	round(RL, RR, 12, 1);
	round(RR, RL, 13, 2);
	round(RL, RR, 14, 3);
	round(RR, RL, 15, 1);

__skip_enc:
	popq %rcx;
	popq %rbx;

	testb %cl, %cl;
	jnz __enc_xor16;

	outunpack_blocks(%rsi, RR1, RL1, RTMP, RX);
	leaq (2*4*4)(%rsi), %rax;
	outunpack_blocks(%rax, RR2, RL2, RTMP, RX);
	leaq (2*4*4)(%rax), %rax;
	outunpack_blocks(%rax, RR3, RL3, RTMP, RX);
	leaq (2*4*4)(%rax), %rax;
	outunpack_blocks(%rax, RR4, RL4, RTMP, RX);

	ret;

__enc_xor16:
	outunpack_xor_blocks(%rsi, RR1, RL1, RTMP, RX);
	leaq (2*4*4)(%rsi), %rax;
	outunpack_xor_blocks(%rax, RR2, RL2, RTMP, RX);
	leaq (2*4*4)(%rax), %rax;
	outunpack_xor_blocks(%rax, RR3, RL3, RTMP, RX);
	leaq (2*4*4)(%rax), %rax;
	outunpack_xor_blocks(%rax, RR4, RL4, RTMP, RX);

	ret;

.align 16
.global cast5_dec_blk_16way
.type   cast5_dec_blk_16way,@function;

cast5_dec_blk_16way:
	/* input:
	 *	%rdi: ctx, CTX
	 *	%rsi: dst
	 *	%rdx: src
	 */

	pushq %rbx;

	vmovdqu .Lbswap_mask, RMASK;
	vmovdqu .L32_mask, R32;
	vpxor RKRF, RKRF, RKRF;

	inpack_blocks(%rdx, RL1, RR1, RTMP, RX);
	leaq (2*4*4)(%rdx), %rax;
	inpack_blocks(%rax, RL2, RR2, RTMP, RX);
	leaq (2*4*4)(%rax), %rax;
	inpack_blocks(%rax, RL3, RR3, RTMP, RX);
	leaq (2*4*4)(%rax), %rax;
	inpack_blocks(%rax, RL4, RR4, RTMP, RX);

	xorq RID1, RID1;
	xorq RID2, RID2;

	movb rr(CTX), %al;
	testb %al, %al;
	jnz __skip_dec;

	round(RL, RR, 15, 1);
	round(RR, RL, 14, 3);
	round(RL, RR, 13, 2);
	round(RR, RL, 12, 1);

__skip_dec:
	round(RL, RR, 11, 3);
	round(RR, RL, 10, 2);
	round(RL, RR, 9, 1);
	round(RR, RL, 8, 3);
	round(RL, RR, 7, 2);
	round(RR, RL, 6, 1);
	round(RL, RR, 5, 3);
	round(RR, RL, 4, 2);
	round(RL, RR, 3, 1);
	round(RR, RL, 2, 3);
	round(RL, RR, 1, 2);
	round(RR, RL, 0, 1);

	popq %rbx;

	outunpack_blocks(%rsi, RR1, RL1, RTMP, RX);
	leaq (2*4*4)(%rsi), %rax;
	outunpack_blocks(%rax, RR2, RL2, RTMP, RX);
	leaq (2*4*4)(%rax), %rax;
	outunpack_blocks(%rax, RR3, RL3, RTMP, RX);
	leaq (2*4*4)(%rax), %rax;
	outunpack_blocks(%rax, RR4, RL4, RTMP, RX);

	ret;