aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/cipher.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2005-07-06 16:52:09 -0400
committerDavid S. Miller <davem@davemloft.net>2005-07-06 16:52:09 -0400
commit95477377995aefa2ec1654a9a3777bd57ea99146 (patch)
tree7aa4d6173de13c81c2fa0e4d2f9e0de22e141b6a /crypto/cipher.c
parent40725181b74be6b0e3bdc8c05bd1e0b9873ec5cc (diff)
[CRYPTO] Add alignmask for low-level cipher implementations
The VIA Padlock device requires the input and output buffers to be aligned on 16-byte boundaries. This patch adds the alignmask attribute for low-level cipher implementations to indicate their alignment requirements. The mid-level crypt() function will copy the input/output buffers if they are not aligned correctly before they are passed to the low-level implementation. Strictly speaking, some of the software implementations require the buffers to be aligned on 4-byte boundaries as they do 32-bit loads. However, it is not clear whether it is better to copy the buffers or pay the penalty for unaligned loads/stores. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'crypto/cipher.c')
-rw-r--r--crypto/cipher.c43
1 files changed, 36 insertions, 7 deletions
diff --git a/crypto/cipher.c b/crypto/cipher.c
index 54c4a560070d..85eb12f8e564 100644
--- a/crypto/cipher.c
+++ b/crypto/cipher.c
@@ -41,8 +41,10 @@ static unsigned int crypt_slow(const struct cipher_desc *desc,
41 struct scatter_walk *in, 41 struct scatter_walk *in,
42 struct scatter_walk *out, unsigned int bsize) 42 struct scatter_walk *out, unsigned int bsize)
43{ 43{
44 u8 src[bsize]; 44 unsigned int alignmask = desc->tfm->__crt_alg->cra_alignmask;
45 u8 dst[bsize]; 45 u8 buffer[bsize * 2 + alignmask];
46 u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
47 u8 *dst = src + bsize;
46 unsigned int n; 48 unsigned int n;
47 49
48 n = scatterwalk_copychunks(src, in, bsize, 0); 50 n = scatterwalk_copychunks(src, in, bsize, 0);
@@ -59,15 +61,24 @@ static unsigned int crypt_slow(const struct cipher_desc *desc,
59static inline unsigned int crypt_fast(const struct cipher_desc *desc, 61static inline unsigned int crypt_fast(const struct cipher_desc *desc,
60 struct scatter_walk *in, 62 struct scatter_walk *in,
61 struct scatter_walk *out, 63 struct scatter_walk *out,
62 unsigned int nbytes) 64 unsigned int nbytes, u8 *tmp)
63{ 65{
64 u8 *src, *dst; 66 u8 *src, *dst;
65 67
66 src = in->data; 68 src = in->data;
67 dst = scatterwalk_samebuf(in, out) ? src : out->data; 69 dst = scatterwalk_samebuf(in, out) ? src : out->data;
68 70
71 if (tmp) {
72 memcpy(tmp, in->data, nbytes);
73 src = tmp;
74 dst = tmp;
75 }
76
69 nbytes = desc->prfn(desc, dst, src, nbytes); 77 nbytes = desc->prfn(desc, dst, src, nbytes);
70 78
79 if (tmp)
80 memcpy(out->data, tmp, nbytes);
81
71 scatterwalk_advance(in, nbytes); 82 scatterwalk_advance(in, nbytes);
72 scatterwalk_advance(out, nbytes); 83 scatterwalk_advance(out, nbytes);
73 84
@@ -87,6 +98,8 @@ static int crypt(const struct cipher_desc *desc,
87 struct scatter_walk walk_in, walk_out; 98 struct scatter_walk walk_in, walk_out;
88 struct crypto_tfm *tfm = desc->tfm; 99 struct crypto_tfm *tfm = desc->tfm;
89 const unsigned int bsize = crypto_tfm_alg_blocksize(tfm); 100 const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
101 unsigned int alignmask = tfm->__crt_alg->cra_alignmask;
102 unsigned long buffer = 0;
90 103
91 if (!nbytes) 104 if (!nbytes)
92 return 0; 105 return 0;
@@ -100,16 +113,27 @@ static int crypt(const struct cipher_desc *desc,
100 scatterwalk_start(&walk_out, dst); 113 scatterwalk_start(&walk_out, dst);
101 114
102 for(;;) { 115 for(;;) {
103 unsigned int n; 116 unsigned int n = nbytes;
117 u8 *tmp = NULL;
118
119 if (!scatterwalk_aligned(&walk_in, alignmask) ||
120 !scatterwalk_aligned(&walk_out, alignmask)) {
121 if (!buffer) {
122 buffer = __get_free_page(GFP_ATOMIC);
123 if (!buffer)
124 n = 0;
125 }
126 tmp = (u8 *)buffer;
127 }
104 128
105 scatterwalk_map(&walk_in, 0); 129 scatterwalk_map(&walk_in, 0);
106 scatterwalk_map(&walk_out, 1); 130 scatterwalk_map(&walk_out, 1);
107 131
108 n = scatterwalk_clamp(&walk_in, nbytes); 132 n = scatterwalk_clamp(&walk_in, n);
109 n = scatterwalk_clamp(&walk_out, n); 133 n = scatterwalk_clamp(&walk_out, n);
110 134
111 if (likely(n >= bsize)) 135 if (likely(n >= bsize))
112 n = crypt_fast(desc, &walk_in, &walk_out, n); 136 n = crypt_fast(desc, &walk_in, &walk_out, n, tmp);
113 else 137 else
114 n = crypt_slow(desc, &walk_in, &walk_out, bsize); 138 n = crypt_slow(desc, &walk_in, &walk_out, bsize);
115 139
@@ -119,10 +143,15 @@ static int crypt(const struct cipher_desc *desc,
119 scatterwalk_done(&walk_out, 1, nbytes); 143 scatterwalk_done(&walk_out, 1, nbytes);
120 144
121 if (!nbytes) 145 if (!nbytes)
122 return 0; 146 break;
123 147
124 crypto_yield(tfm); 148 crypto_yield(tfm);
125 } 149 }
150
151 if (buffer)
152 free_page(buffer);
153
154 return 0;
126} 155}
127 156
128static unsigned int cbc_process_encrypt(const struct cipher_desc *desc, 157static unsigned int cbc_process_encrypt(const struct cipher_desc *desc,