aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Glauber <jan.glauber@de.ibm.com>2007-04-27 10:01:54 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2007-04-27 10:01:46 -0400
commit131a395c18af43d824841642038e5cc0d48f0bd2 (patch)
treec3fec61634deee0589e3a0fa3aec0a63803f3815
parent6d4740c89c187ee8f5ac7355c4eeffda26493d1f (diff)
[S390] crypto: cleanup.
Cleanup code and remove obsolete documentation. Signed-off-by: Jan Glauber <jan.glauber@de.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--Documentation/s390/crypto/crypto-API.txt83
-rw-r--r--arch/s390/crypto/sha1_s390.c129
-rw-r--r--arch/s390/crypto/sha256_s390.c38
3 files changed, 81 insertions, 169 deletions
diff --git a/Documentation/s390/crypto/crypto-API.txt b/Documentation/s390/crypto/crypto-API.txt
deleted file mode 100644
index 71ae6ca9f2c2..000000000000
--- a/Documentation/s390/crypto/crypto-API.txt
+++ /dev/null
@@ -1,83 +0,0 @@
1crypto-API support for z990 Message Security Assist (MSA) instructions
2~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
3
4AUTHOR: Thomas Spatzier (tspat@de.ibm.com)
5
6
71. Introduction crypto-API
8~~~~~~~~~~~~~~~~~~~~~~~~~~
9See Documentation/crypto/api-intro.txt for an introduction/description of the
10kernel crypto API.
11According to api-intro.txt support for z990 crypto instructions has been added
12in the algorithm api layer of the crypto API. Several files containing z990
13optimized implementations of crypto algorithms are placed in the
14arch/s390/crypto directory.
15
16
172. Probing for availability of MSA
18~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
19It should be possible to use Kernels with the z990 crypto implementations both
20on machines with MSA available and on those without MSA (pre z990 or z990
21without MSA). Therefore a simple probing mechanism has been implemented:
22In the init function of each crypto module the availability of MSA and of the
23respective crypto algorithm in particular will be tested. If the algorithm is
24available the module will load and register its algorithm with the crypto API.
25
26If the respective crypto algorithm is not available, the init function will
27return -ENOSYS. In that case a fallback to the standard software implementation
28of the crypto algorithm must be taken ( -> the standard crypto modules are
29also built when compiling the kernel).
30
31
323. Ensuring z990 crypto module preference
33~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
34If z990 crypto instructions are available the optimized modules should be
35preferred instead of standard modules.
36
373.1. compiled-in modules
38~~~~~~~~~~~~~~~~~~~~~~~~
39For compiled-in modules it has to be ensured that the z990 modules are linked
40before the standard crypto modules. Then, on system startup the init functions
41of z990 crypto modules will be called first and query for availability of z990
42crypto instructions. If instruction is available, the z990 module will register
43its crypto algorithm implementation -> the load of the standard module will fail
44since the algorithm is already registered.
45If z990 crypto instruction is not available the load of the z990 module will
46fail -> the standard module will load and register its algorithm.
47
483.2. dynamic modules
49~~~~~~~~~~~~~~~~~~~~
50A system administrator has to take care of giving preference to z990 crypto
51modules. If MSA is available appropriate lines have to be added to
52/etc/modprobe.conf.
53
54Example: z990 crypto instruction for SHA1 algorithm is available
55
56 add the following line to /etc/modprobe.conf (assuming the
57 z990 crypto modules for SHA1 is called sha1_z990):
58
59 alias sha1 sha1_z990
60
61 -> when the sha1 algorithm is requested through the crypto API
62 (which has a module autoloader) the z990 module will be loaded.
63
64TBD: a userspace module probing mechanism
65 something like 'probe sha1 sha1_z990 sha1' in modprobe.conf
66 -> try module sha1_z990, if it fails to load standard module sha1
67 the 'probe' statement is currently not supported in modprobe.conf
68
69
704. Currently implemented z990 crypto algorithms
71~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
72The following crypto algorithms with z990 MSA support are currently implemented.
73The name of each algorithm under which it is registered in crypto API and the
74name of the respective module is given in square brackets.
75
76- SHA1 Digest Algorithm [sha1 -> sha1_z990]
77- DES Encrypt/Decrypt Algorithm (64bit key) [des -> des_z990]
78- Triple DES Encrypt/Decrypt Algorithm (128bit key) [des3_ede128 -> des_z990]
79- Triple DES Encrypt/Decrypt Algorithm (192bit key) [des3_ede -> des_z990]
80
81In order to load, for example, the sha1_z990 module when the sha1 algorithm is
82requested (see 3.2.) add 'alias sha1 sha1_z990' to /etc/modprobe.conf.
83
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 969639f31977..af4460ec381f 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -25,99 +25,100 @@
25 */ 25 */
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/mm.h>
29#include <linux/crypto.h> 28#include <linux/crypto.h>
30#include <asm/scatterlist.h> 29
31#include <asm/byteorder.h>
32#include "crypt_s390.h" 30#include "crypt_s390.h"
33 31
34#define SHA1_DIGEST_SIZE 20 32#define SHA1_DIGEST_SIZE 20
35#define SHA1_BLOCK_SIZE 64 33#define SHA1_BLOCK_SIZE 64
36 34
37struct crypt_s390_sha1_ctx { 35struct s390_sha1_ctx {
38 u64 count; 36 u64 count; /* message length */
39 u32 state[5]; 37 u32 state[5];
40 u32 buf_len; 38 u8 buf[2 * SHA1_BLOCK_SIZE];
41 u8 buffer[2 * SHA1_BLOCK_SIZE];
42}; 39};
43 40
44static void sha1_init(struct crypto_tfm *tfm) 41static void sha1_init(struct crypto_tfm *tfm)
45{ 42{
46 struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm); 43 struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
47 44
48 ctx->state[0] = 0x67452301; 45 sctx->state[0] = 0x67452301;
49 ctx->state[1] = 0xEFCDAB89; 46 sctx->state[1] = 0xEFCDAB89;
50 ctx->state[2] = 0x98BADCFE; 47 sctx->state[2] = 0x98BADCFE;
51 ctx->state[3] = 0x10325476; 48 sctx->state[3] = 0x10325476;
52 ctx->state[4] = 0xC3D2E1F0; 49 sctx->state[4] = 0xC3D2E1F0;
53 50 sctx->count = 0;
54 ctx->count = 0;
55 ctx->buf_len = 0;
56} 51}
57 52
58static void sha1_update(struct crypto_tfm *tfm, const u8 *data, 53static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
59 unsigned int len) 54 unsigned int len)
60{ 55{
61 struct crypt_s390_sha1_ctx *sctx; 56 struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
62 long imd_len; 57 unsigned int index;
63 58 int ret;
64 sctx = crypto_tfm_ctx(tfm); 59
65 sctx->count += len * 8; /* message bit length */ 60 /* how much is already in the buffer? */
66 61 index = sctx->count & 0x3f;
67 /* anything in buffer yet? -> must be completed */ 62
68 if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) { 63 sctx->count += len;
69 /* complete full block and hash */ 64
70 memcpy(sctx->buffer + sctx->buf_len, data, 65 if (index + len < SHA1_BLOCK_SIZE)
71 SHA1_BLOCK_SIZE - sctx->buf_len); 66 goto store;
72 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, 67
73 SHA1_BLOCK_SIZE); 68 /* process one stored block */
74 data += SHA1_BLOCK_SIZE - sctx->buf_len; 69 if (index) {
75 len -= SHA1_BLOCK_SIZE - sctx->buf_len; 70 memcpy(sctx->buf + index, data, SHA1_BLOCK_SIZE - index);
76 sctx->buf_len = 0; 71 ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf,
72 SHA1_BLOCK_SIZE);
73 BUG_ON(ret != SHA1_BLOCK_SIZE);
74 data += SHA1_BLOCK_SIZE - index;
75 len -= SHA1_BLOCK_SIZE - index;
77 } 76 }
78 77
79 /* rest of data contains full blocks? */ 78 /* process as many blocks as possible */
80 imd_len = len & ~0x3ful; 79 if (len >= SHA1_BLOCK_SIZE) {
81 if (imd_len) { 80 ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, data,
82 crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len); 81 len & ~(SHA1_BLOCK_SIZE - 1));
83 data += imd_len; 82 BUG_ON(ret != (len & ~(SHA1_BLOCK_SIZE - 1)));
84 len -= imd_len; 83 data += ret;
84 len -= ret;
85 } 85 }
86 /* anything left? store in buffer */
87 if (len) {
88 memcpy(sctx->buffer + sctx->buf_len , data, len);
89 sctx->buf_len += len;
90 }
91}
92 86
87store:
88 /* anything left? */
89 if (len)
90 memcpy(sctx->buf + index , data, len);
91}
93 92
94static void pad_message(struct crypt_s390_sha1_ctx* sctx) 93/* Add padding and return the message digest. */
94static void sha1_final(struct crypto_tfm *tfm, u8 *out)
95{ 95{
96 int index; 96 struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
97 u64 bits;
98 unsigned int index, end;
99 int ret;
100
101 /* must perform manual padding */
102 index = sctx->count & 0x3f;
103 end = (index < 56) ? SHA1_BLOCK_SIZE : (2 * SHA1_BLOCK_SIZE);
97 104
98 index = sctx->buf_len;
99 sctx->buf_len = (sctx->buf_len < 56) ?
100 SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE;
101 /* start pad with 1 */ 105 /* start pad with 1 */
102 sctx->buffer[index] = 0x80; 106 sctx->buf[index] = 0x80;
107
103 /* pad with zeros */ 108 /* pad with zeros */
104 index++; 109 index++;
105 memset(sctx->buffer + index, 0x00, sctx->buf_len - index); 110 memset(sctx->buf + index, 0x00, end - index - 8);
106 /* append length */
107 memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count,
108 sizeof sctx->count);
109}
110 111
111/* Add padding and return the message digest. */ 112 /* append message length */
112static void sha1_final(struct crypto_tfm *tfm, u8 *out) 113 bits = sctx->count * 8;
113{ 114 memcpy(sctx->buf + end - 8, &bits, sizeof(bits));
114 struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); 115
116 ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf, end);
117 BUG_ON(ret != end);
115 118
116 /* must perform manual padding */
117 pad_message(sctx);
118 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len);
119 /* copy digest to out */ 119 /* copy digest to out */
120 memcpy(out, sctx->state, SHA1_DIGEST_SIZE); 120 memcpy(out, sctx->state, SHA1_DIGEST_SIZE);
121
121 /* wipe context */ 122 /* wipe context */
122 memset(sctx, 0, sizeof *sctx); 123 memset(sctx, 0, sizeof *sctx);
123} 124}
@@ -128,7 +129,7 @@ static struct crypto_alg alg = {
128 .cra_priority = CRYPT_S390_PRIORITY, 129 .cra_priority = CRYPT_S390_PRIORITY,
129 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 130 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
130 .cra_blocksize = SHA1_BLOCK_SIZE, 131 .cra_blocksize = SHA1_BLOCK_SIZE,
131 .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx), 132 .cra_ctxsize = sizeof(struct s390_sha1_ctx),
132 .cra_module = THIS_MODULE, 133 .cra_module = THIS_MODULE,
133 .cra_list = LIST_HEAD_INIT(alg.cra_list), 134 .cra_list = LIST_HEAD_INIT(alg.cra_list),
134 .cra_u = { .digest = { 135 .cra_u = { .digest = {
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 78436c696d37..2ced3330bce0 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -26,7 +26,7 @@
26#define SHA256_BLOCK_SIZE 64 26#define SHA256_BLOCK_SIZE 64
27 27
28struct s390_sha256_ctx { 28struct s390_sha256_ctx {
29 u64 count; 29 u64 count; /* message length */
30 u32 state[8]; 30 u32 state[8];
31 u8 buf[2 * SHA256_BLOCK_SIZE]; 31 u8 buf[2 * SHA256_BLOCK_SIZE];
32}; 32};
@@ -54,10 +54,9 @@ static void sha256_update(struct crypto_tfm *tfm, const u8 *data,
54 int ret; 54 int ret;
55 55
56 /* how much is already in the buffer? */ 56 /* how much is already in the buffer? */
57 index = sctx->count / 8 & 0x3f; 57 index = sctx->count & 0x3f;
58 58
59 /* update message bit length */ 59 sctx->count += len;
60 sctx->count += len * 8;
61 60
62 if ((index + len) < SHA256_BLOCK_SIZE) 61 if ((index + len) < SHA256_BLOCK_SIZE)
63 goto store; 62 goto store;
@@ -87,12 +86,17 @@ store:
87 memcpy(sctx->buf + index , data, len); 86 memcpy(sctx->buf + index , data, len);
88} 87}
89 88
90static void pad_message(struct s390_sha256_ctx* sctx) 89/* Add padding and return the message digest */
90static void sha256_final(struct crypto_tfm *tfm, u8 *out)
91{ 91{
92 int index, end; 92 struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
93 u64 bits;
94 unsigned int index, end;
95 int ret;
93 96
94 index = sctx->count / 8 & 0x3f; 97 /* must perform manual padding */
95 end = index < 56 ? SHA256_BLOCK_SIZE : 2 * SHA256_BLOCK_SIZE; 98 index = sctx->count & 0x3f;
99 end = (index < 56) ? SHA256_BLOCK_SIZE : (2 * SHA256_BLOCK_SIZE);
96 100
97 /* start pad with 1 */ 101 /* start pad with 1 */
98 sctx->buf[index] = 0x80; 102 sctx->buf[index] = 0x80;
@@ -102,21 +106,11 @@ static void pad_message(struct s390_sha256_ctx* sctx)
102 memset(sctx->buf + index, 0x00, end - index - 8); 106 memset(sctx->buf + index, 0x00, end - index - 8);
103 107
104 /* append message length */ 108 /* append message length */
105 memcpy(sctx->buf + end - 8, &sctx->count, sizeof sctx->count); 109 bits = sctx->count * 8;
106 110 memcpy(sctx->buf + end - 8, &bits, sizeof(bits));
107 sctx->count = end * 8;
108}
109
110/* Add padding and return the message digest */
111static void sha256_final(struct crypto_tfm *tfm, u8 *out)
112{
113 struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
114
115 /* must perform manual padding */
116 pad_message(sctx);
117 111
118 crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, 112 ret = crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, end);
119 sctx->count / 8); 113 BUG_ON(ret != end);
120 114
121 /* copy digest to out */ 115 /* copy digest to out */
122 memcpy(out, sctx->state, SHA256_DIGEST_SIZE); 116 memcpy(out, sctx->state, SHA256_DIGEST_SIZE);