aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/purgatory
diff options
context:
space:
mode:
authorVivek Goyal <vgoyal@redhat.com>2014-08-08 17:25:59 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-08 18:57:32 -0400
commitdaeba0641a707626f3674db71016f333edf64395 (patch)
treec51b9bb9e7ca4ae6638f5bf0faf7115ddee8d3f6 /arch/x86/purgatory
parentcb1052581e2bddd6096544f3f944f4e7fdad4c7f (diff)
purgatory/sha256: provide implementation of sha256 in purgaotory context
Next two patches provide code for purgatory. This is a code which does not link against the kernel and runs stand alone. This code runs between two kernels. One of the primary purpose of this code is to verify the digest of newly loaded kernel and making sure it matches the digest computed at kernel load time. We use sha256 for calculating digest of kexec segmetns. Purgatory can't use stanard crypto API as that API is not available in purgatory context. Hence, I have copied code from crypto/sha256_generic.c and compiled it with purgaotry code so that it could be used. I could not #include sha256_generic.c file here as some of the function signature requiered little tweaking. Original functions work with crypto API but these ones don't So instead of doing #include on sha256_generic.c I just copied relevant portions of code into arch/x86/purgatory/sha256.c. Now we shouldn't have to touch this code at all. Do let me know if there are better ways to handle it. This patch does not enable compiling of this code. That happens in next patch. I wanted to highlight this change in a separate patch for easy review. Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Cc: Borislav Petkov <bp@suse.de> Cc: Michael Kerrisk <mtk.manpages@gmail.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Eric Biederman <ebiederm@xmission.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Matthew Garrett <mjg59@srcf.ucam.org> Cc: Greg Kroah-Hartman <greg@kroah.com> Cc: Dave Young <dyoung@redhat.com> Cc: WANG Chao <chaowang@redhat.com> Cc: Baoquan He <bhe@redhat.com> Cc: Andy Lutomirski <luto@amacapital.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/purgatory')
-rw-r--r--arch/x86/purgatory/sha256.c283
-rw-r--r--arch/x86/purgatory/sha256.h22
2 files changed, 305 insertions, 0 deletions
diff --git a/arch/x86/purgatory/sha256.c b/arch/x86/purgatory/sha256.c
new file mode 100644
index 000000000000..548ca675a14a
--- /dev/null
+++ b/arch/x86/purgatory/sha256.c
@@ -0,0 +1,283 @@
1/*
2 * SHA-256, as specified in
3 * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf
4 *
5 * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>.
6 *
7 * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
8 * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
9 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
10 * Copyright (c) 2014 Red Hat Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the Free
14 * Software Foundation; either version 2 of the License, or (at your option)
15 * any later version.
16 */
17
18#include <linux/bitops.h>
19#include <asm/byteorder.h>
20#include "sha256.h"
21#include "../boot/string.h"
22
23static inline u32 Ch(u32 x, u32 y, u32 z)
24{
25 return z ^ (x & (y ^ z));
26}
27
28static inline u32 Maj(u32 x, u32 y, u32 z)
29{
30 return (x & y) | (z & (x | y));
31}
32
33#define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22))
34#define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25))
35#define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3))
36#define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10))
37
38static inline void LOAD_OP(int I, u32 *W, const u8 *input)
39{
40 W[I] = __be32_to_cpu(((__be32 *)(input))[I]);
41}
42
43static inline void BLEND_OP(int I, u32 *W)
44{
45 W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
46}
47
48static void sha256_transform(u32 *state, const u8 *input)
49{
50 u32 a, b, c, d, e, f, g, h, t1, t2;
51 u32 W[64];
52 int i;
53
54 /* load the input */
55 for (i = 0; i < 16; i++)
56 LOAD_OP(i, W, input);
57
58 /* now blend */
59 for (i = 16; i < 64; i++)
60 BLEND_OP(i, W);
61
62 /* load the state into our registers */
63 a = state[0]; b = state[1]; c = state[2]; d = state[3];
64 e = state[4]; f = state[5]; g = state[6]; h = state[7];
65
66 /* now iterate */
67 t1 = h + e1(e) + Ch(e, f, g) + 0x428a2f98 + W[0];
68 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
69 t1 = g + e1(d) + Ch(d, e, f) + 0x71374491 + W[1];
70 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
71 t1 = f + e1(c) + Ch(c, d, e) + 0xb5c0fbcf + W[2];
72 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
73 t1 = e + e1(b) + Ch(b, c, d) + 0xe9b5dba5 + W[3];
74 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
75 t1 = d + e1(a) + Ch(a, b, c) + 0x3956c25b + W[4];
76 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
77 t1 = c + e1(h) + Ch(h, a, b) + 0x59f111f1 + W[5];
78 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
79 t1 = b + e1(g) + Ch(g, h, a) + 0x923f82a4 + W[6];
80 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
81 t1 = a + e1(f) + Ch(f, g, h) + 0xab1c5ed5 + W[7];
82 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2;
83
84 t1 = h + e1(e) + Ch(e, f, g) + 0xd807aa98 + W[8];
85 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
86 t1 = g + e1(d) + Ch(d, e, f) + 0x12835b01 + W[9];
87 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
88 t1 = f + e1(c) + Ch(c, d, e) + 0x243185be + W[10];
89 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
90 t1 = e + e1(b) + Ch(b, c, d) + 0x550c7dc3 + W[11];
91 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
92 t1 = d + e1(a) + Ch(a, b, c) + 0x72be5d74 + W[12];
93 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
94 t1 = c + e1(h) + Ch(h, a, b) + 0x80deb1fe + W[13];
95 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
96 t1 = b + e1(g) + Ch(g, h, a) + 0x9bdc06a7 + W[14];
97 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
98 t1 = a + e1(f) + Ch(f, g, h) + 0xc19bf174 + W[15];
99 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
100
101 t1 = h + e1(e) + Ch(e, f, g) + 0xe49b69c1 + W[16];
102 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
103 t1 = g + e1(d) + Ch(d, e, f) + 0xefbe4786 + W[17];
104 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
105 t1 = f + e1(c) + Ch(c, d, e) + 0x0fc19dc6 + W[18];
106 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
107 t1 = e + e1(b) + Ch(b, c, d) + 0x240ca1cc + W[19];
108 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
109 t1 = d + e1(a) + Ch(a, b, c) + 0x2de92c6f + W[20];
110 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
111 t1 = c + e1(h) + Ch(h, a, b) + 0x4a7484aa + W[21];
112 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
113 t1 = b + e1(g) + Ch(g, h, a) + 0x5cb0a9dc + W[22];
114 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
115 t1 = a + e1(f) + Ch(f, g, h) + 0x76f988da + W[23];
116 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
117
118 t1 = h + e1(e) + Ch(e, f, g) + 0x983e5152 + W[24];
119 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
120 t1 = g + e1(d) + Ch(d, e, f) + 0xa831c66d + W[25];
121 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
122 t1 = f + e1(c) + Ch(c, d, e) + 0xb00327c8 + W[26];
123 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
124 t1 = e + e1(b) + Ch(b, c, d) + 0xbf597fc7 + W[27];
125 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
126 t1 = d + e1(a) + Ch(a, b, c) + 0xc6e00bf3 + W[28];
127 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
128 t1 = c + e1(h) + Ch(h, a, b) + 0xd5a79147 + W[29];
129 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
130 t1 = b + e1(g) + Ch(g, h, a) + 0x06ca6351 + W[30];
131 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
132 t1 = a + e1(f) + Ch(f, g, h) + 0x14292967 + W[31];
133 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
134
135 t1 = h + e1(e) + Ch(e, f, g) + 0x27b70a85 + W[32];
136 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
137 t1 = g + e1(d) + Ch(d, e, f) + 0x2e1b2138 + W[33];
138 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
139 t1 = f + e1(c) + Ch(c, d, e) + 0x4d2c6dfc + W[34];
140 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
141 t1 = e + e1(b) + Ch(b, c, d) + 0x53380d13 + W[35];
142 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
143 t1 = d + e1(a) + Ch(a, b, c) + 0x650a7354 + W[36];
144 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
145 t1 = c + e1(h) + Ch(h, a, b) + 0x766a0abb + W[37];
146 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
147 t1 = b + e1(g) + Ch(g, h, a) + 0x81c2c92e + W[38];
148 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
149 t1 = a + e1(f) + Ch(f, g, h) + 0x92722c85 + W[39];
150 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
151
152 t1 = h + e1(e) + Ch(e, f, g) + 0xa2bfe8a1 + W[40];
153 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
154 t1 = g + e1(d) + Ch(d, e, f) + 0xa81a664b + W[41];
155 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
156 t1 = f + e1(c) + Ch(c, d, e) + 0xc24b8b70 + W[42];
157 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
158 t1 = e + e1(b) + Ch(b, c, d) + 0xc76c51a3 + W[43];
159 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
160 t1 = d + e1(a) + Ch(a, b, c) + 0xd192e819 + W[44];
161 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
162 t1 = c + e1(h) + Ch(h, a, b) + 0xd6990624 + W[45];
163 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
164 t1 = b + e1(g) + Ch(g, h, a) + 0xf40e3585 + W[46];
165 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
166 t1 = a + e1(f) + Ch(f, g, h) + 0x106aa070 + W[47];
167 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
168
169 t1 = h + e1(e) + Ch(e, f, g) + 0x19a4c116 + W[48];
170 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
171 t1 = g + e1(d) + Ch(d, e, f) + 0x1e376c08 + W[49];
172 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
173 t1 = f + e1(c) + Ch(c, d, e) + 0x2748774c + W[50];
174 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
175 t1 = e + e1(b) + Ch(b, c, d) + 0x34b0bcb5 + W[51];
176 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
177 t1 = d + e1(a) + Ch(a, b, c) + 0x391c0cb3 + W[52];
178 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
179 t1 = c + e1(h) + Ch(h, a, b) + 0x4ed8aa4a + W[53];
180 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
181 t1 = b + e1(g) + Ch(g, h, a) + 0x5b9cca4f + W[54];
182 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
183 t1 = a + e1(f) + Ch(f, g, h) + 0x682e6ff3 + W[55];
184 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
185
186 t1 = h + e1(e) + Ch(e, f, g) + 0x748f82ee + W[56];
187 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
188 t1 = g + e1(d) + Ch(d, e, f) + 0x78a5636f + W[57];
189 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
190 t1 = f + e1(c) + Ch(c, d, e) + 0x84c87814 + W[58];
191 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
192 t1 = e + e1(b) + Ch(b, c, d) + 0x8cc70208 + W[59];
193 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
194 t1 = d + e1(a) + Ch(a, b, c) + 0x90befffa + W[60];
195 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
196 t1 = c + e1(h) + Ch(h, a, b) + 0xa4506ceb + W[61];
197 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
198 t1 = b + e1(g) + Ch(g, h, a) + 0xbef9a3f7 + W[62];
199 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
200 t1 = a + e1(f) + Ch(f, g, h) + 0xc67178f2 + W[63];
201 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
202
203 state[0] += a; state[1] += b; state[2] += c; state[3] += d;
204 state[4] += e; state[5] += f; state[6] += g; state[7] += h;
205
206 /* clear any sensitive info... */
207 a = b = c = d = e = f = g = h = t1 = t2 = 0;
208 memset(W, 0, 64 * sizeof(u32));
209}
210
211int sha256_init(struct sha256_state *sctx)
212{
213 sctx->state[0] = SHA256_H0;
214 sctx->state[1] = SHA256_H1;
215 sctx->state[2] = SHA256_H2;
216 sctx->state[3] = SHA256_H3;
217 sctx->state[4] = SHA256_H4;
218 sctx->state[5] = SHA256_H5;
219 sctx->state[6] = SHA256_H6;
220 sctx->state[7] = SHA256_H7;
221 sctx->count = 0;
222
223 return 0;
224}
225
226int sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
227{
228 unsigned int partial, done;
229 const u8 *src;
230
231 partial = sctx->count & 0x3f;
232 sctx->count += len;
233 done = 0;
234 src = data;
235
236 if ((partial + len) > 63) {
237 if (partial) {
238 done = -partial;
239 memcpy(sctx->buf + partial, data, done + 64);
240 src = sctx->buf;
241 }
242
243 do {
244 sha256_transform(sctx->state, src);
245 done += 64;
246 src = data + done;
247 } while (done + 63 < len);
248
249 partial = 0;
250 }
251 memcpy(sctx->buf + partial, src, len - done);
252
253 return 0;
254}
255
256int sha256_final(struct sha256_state *sctx, u8 *out)
257{
258 __be32 *dst = (__be32 *)out;
259 __be64 bits;
260 unsigned int index, pad_len;
261 int i;
262 static const u8 padding[64] = { 0x80, };
263
264 /* Save number of bits */
265 bits = cpu_to_be64(sctx->count << 3);
266
267 /* Pad out to 56 mod 64. */
268 index = sctx->count & 0x3f;
269 pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
270 sha256_update(sctx, padding, pad_len);
271
272 /* Append length (before padding) */
273 sha256_update(sctx, (const u8 *)&bits, sizeof(bits));
274
275 /* Store state in digest */
276 for (i = 0; i < 8; i++)
277 dst[i] = cpu_to_be32(sctx->state[i]);
278
279 /* Zeroize sensitive information. */
280 memset(sctx, 0, sizeof(*sctx));
281
282 return 0;
283}
diff --git a/arch/x86/purgatory/sha256.h b/arch/x86/purgatory/sha256.h
new file mode 100644
index 000000000000..bd15a4127735
--- /dev/null
+++ b/arch/x86/purgatory/sha256.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) 2014 Red Hat Inc.
3 *
4 * Author: Vivek Goyal <vgoyal@redhat.com>
5 *
6 * This source code is licensed under the GNU General Public License,
7 * Version 2. See the file COPYING for more details.
8 */
9
10#ifndef SHA256_H
11#define SHA256_H
12
13
14#include <linux/types.h>
15#include <crypto/sha.h>
16
17extern int sha256_init(struct sha256_state *sctx);
18extern int sha256_update(struct sha256_state *sctx, const u8 *input,
19 unsigned int length);
20extern int sha256_final(struct sha256_state *sctx, u8 *hash);
21
22#endif /* SHA256_H */