aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorTom Lendacky <thomas.lendacky@amd.com>2013-11-12 12:46:22 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2013-12-05 08:28:38 -0500
commitd312359978e91fc43889ac0d386ab617215f802a (patch)
tree518c2c10fe56abc512777e071200031b69967a61 /drivers/crypto
parent63b945091a070d8d4275dc0f7699ba22cd5f9435 (diff)
crypto: ccp - crypto API interface to the CCP device driver
These routines provide the support for the interface between the crypto API and the AMD CCP. This includes insuring that requests associated with a given tfm on the same cpu are processed in the order received. Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c432
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h191
2 files changed, 623 insertions, 0 deletions
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
new file mode 100644
index 000000000000..2636f044789d
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -0,0 +1,432 @@
1/*
2 * AMD Cryptographic Coprocessor (CCP) crypto API support
3 *
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/list.h>
16#include <linux/ccp.h>
17#include <linux/scatterlist.h>
18#include <crypto/internal/hash.h>
19
20#include "ccp-crypto.h"
21
22MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
23MODULE_LICENSE("GPL");
24MODULE_VERSION("1.0.0");
25MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
26
27
28/* List heads for the supported algorithms */
29static LIST_HEAD(hash_algs);
30static LIST_HEAD(cipher_algs);
31
32/* For any tfm, requests for that tfm on the same CPU must be returned
33 * in the order received. With multiple queues available, the CCP can
34 * process more than one cmd at a time. Therefore we must maintain
35 * a cmd list to insure the proper ordering of requests on a given tfm/cpu
36 * combination.
37 */
38struct ccp_crypto_cpu_queue {
39 struct list_head cmds;
40 struct list_head *backlog;
41 unsigned int cmd_count;
42};
43#define CCP_CRYPTO_MAX_QLEN 50
44
45struct ccp_crypto_percpu_queue {
46 struct ccp_crypto_cpu_queue __percpu *cpu_queue;
47};
48static struct ccp_crypto_percpu_queue req_queue;
49
50struct ccp_crypto_cmd {
51 struct list_head entry;
52
53 struct ccp_cmd *cmd;
54
55 /* Save the crypto_tfm and crypto_async_request addresses
56 * separately to avoid any reference to a possibly invalid
57 * crypto_async_request structure after invoking the request
58 * callback
59 */
60 struct crypto_async_request *req;
61 struct crypto_tfm *tfm;
62
63 /* Used for held command processing to determine state */
64 int ret;
65
66 int cpu;
67};
68
69struct ccp_crypto_cpu {
70 struct work_struct work;
71 struct completion completion;
72 struct ccp_crypto_cmd *crypto_cmd;
73 int err;
74};
75
76
77static inline bool ccp_crypto_success(int err)
78{
79 if (err && (err != -EINPROGRESS) && (err != -EBUSY))
80 return false;
81
82 return true;
83}
84
85/*
86 * ccp_crypto_cmd_complete must be called while running on the appropriate
87 * cpu and the caller must have done a get_cpu to disable preemption
88 */
89static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
90 struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
91{
92 struct ccp_crypto_cpu_queue *cpu_queue;
93 struct ccp_crypto_cmd *held = NULL, *tmp;
94
95 *backlog = NULL;
96
97 cpu_queue = this_cpu_ptr(req_queue.cpu_queue);
98
99 /* Held cmds will be after the current cmd in the queue so start
100 * searching for a cmd with a matching tfm for submission.
101 */
102 tmp = crypto_cmd;
103 list_for_each_entry_continue(tmp, &cpu_queue->cmds, entry) {
104 if (crypto_cmd->tfm != tmp->tfm)
105 continue;
106 held = tmp;
107 break;
108 }
109
110 /* Process the backlog:
111 * Because cmds can be executed from any point in the cmd list
112 * special precautions have to be taken when handling the backlog.
113 */
114 if (cpu_queue->backlog != &cpu_queue->cmds) {
115 /* Skip over this cmd if it is the next backlog cmd */
116 if (cpu_queue->backlog == &crypto_cmd->entry)
117 cpu_queue->backlog = crypto_cmd->entry.next;
118
119 *backlog = container_of(cpu_queue->backlog,
120 struct ccp_crypto_cmd, entry);
121 cpu_queue->backlog = cpu_queue->backlog->next;
122
123 /* Skip over this cmd if it is now the next backlog cmd */
124 if (cpu_queue->backlog == &crypto_cmd->entry)
125 cpu_queue->backlog = crypto_cmd->entry.next;
126 }
127
128 /* Remove the cmd entry from the list of cmds */
129 cpu_queue->cmd_count--;
130 list_del(&crypto_cmd->entry);
131
132 return held;
133}
134
135static void ccp_crypto_complete_on_cpu(struct work_struct *work)
136{
137 struct ccp_crypto_cpu *cpu_work =
138 container_of(work, struct ccp_crypto_cpu, work);
139 struct ccp_crypto_cmd *crypto_cmd = cpu_work->crypto_cmd;
140 struct ccp_crypto_cmd *held, *next, *backlog;
141 struct crypto_async_request *req = crypto_cmd->req;
142 struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm);
143 int cpu, ret;
144
145 cpu = get_cpu();
146
147 if (cpu_work->err == -EINPROGRESS) {
148 /* Only propogate the -EINPROGRESS if necessary */
149 if (crypto_cmd->ret == -EBUSY) {
150 crypto_cmd->ret = -EINPROGRESS;
151 req->complete(req, -EINPROGRESS);
152 }
153
154 goto e_cpu;
155 }
156
157 /* Operation has completed - update the queue before invoking
158 * the completion callbacks and retrieve the next cmd (cmd with
159 * a matching tfm) that can be submitted to the CCP.
160 */
161 held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
162 if (backlog) {
163 backlog->ret = -EINPROGRESS;
164 backlog->req->complete(backlog->req, -EINPROGRESS);
165 }
166
167 /* Transition the state from -EBUSY to -EINPROGRESS first */
168 if (crypto_cmd->ret == -EBUSY)
169 req->complete(req, -EINPROGRESS);
170
171 /* Completion callbacks */
172 ret = cpu_work->err;
173 if (ctx->complete)
174 ret = ctx->complete(req, ret);
175 req->complete(req, ret);
176
177 /* Submit the next cmd */
178 while (held) {
179 ret = ccp_enqueue_cmd(held->cmd);
180 if (ccp_crypto_success(ret))
181 break;
182
183 /* Error occurred, report it and get the next entry */
184 held->req->complete(held->req, ret);
185
186 next = ccp_crypto_cmd_complete(held, &backlog);
187 if (backlog) {
188 backlog->ret = -EINPROGRESS;
189 backlog->req->complete(backlog->req, -EINPROGRESS);
190 }
191
192 kfree(held);
193 held = next;
194 }
195
196 kfree(crypto_cmd);
197
198e_cpu:
199 put_cpu();
200
201 complete(&cpu_work->completion);
202}
203
204static void ccp_crypto_complete(void *data, int err)
205{
206 struct ccp_crypto_cmd *crypto_cmd = data;
207 struct ccp_crypto_cpu cpu_work;
208
209 INIT_WORK(&cpu_work.work, ccp_crypto_complete_on_cpu);
210 init_completion(&cpu_work.completion);
211 cpu_work.crypto_cmd = crypto_cmd;
212 cpu_work.err = err;
213
214 schedule_work_on(crypto_cmd->cpu, &cpu_work.work);
215
216 /* Keep the completion call synchronous */
217 wait_for_completion(&cpu_work.completion);
218}
219
220static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
221{
222 struct ccp_crypto_cpu_queue *cpu_queue;
223 struct ccp_crypto_cmd *active = NULL, *tmp;
224 int cpu, ret;
225
226 cpu = get_cpu();
227 crypto_cmd->cpu = cpu;
228
229 cpu_queue = this_cpu_ptr(req_queue.cpu_queue);
230
231 /* Check if the cmd can/should be queued */
232 if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) {
233 ret = -EBUSY;
234 if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
235 goto e_cpu;
236 }
237
238 /* Look for an entry with the same tfm. If there is a cmd
239 * with the same tfm in the list for this cpu then the current
240 * cmd cannot be submitted to the CCP yet.
241 */
242 list_for_each_entry(tmp, &cpu_queue->cmds, entry) {
243 if (crypto_cmd->tfm != tmp->tfm)
244 continue;
245 active = tmp;
246 break;
247 }
248
249 ret = -EINPROGRESS;
250 if (!active) {
251 ret = ccp_enqueue_cmd(crypto_cmd->cmd);
252 if (!ccp_crypto_success(ret))
253 goto e_cpu;
254 }
255
256 if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) {
257 ret = -EBUSY;
258 if (cpu_queue->backlog == &cpu_queue->cmds)
259 cpu_queue->backlog = &crypto_cmd->entry;
260 }
261 crypto_cmd->ret = ret;
262
263 cpu_queue->cmd_count++;
264 list_add_tail(&crypto_cmd->entry, &cpu_queue->cmds);
265
266e_cpu:
267 put_cpu();
268
269 return ret;
270}
271
272/**
273 * ccp_crypto_enqueue_request - queue an crypto async request for processing
274 * by the CCP
275 *
276 * @req: crypto_async_request struct to be processed
277 * @cmd: ccp_cmd struct to be sent to the CCP
278 */
279int ccp_crypto_enqueue_request(struct crypto_async_request *req,
280 struct ccp_cmd *cmd)
281{
282 struct ccp_crypto_cmd *crypto_cmd;
283 gfp_t gfp;
284 int ret;
285
286 gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
287
288 crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp);
289 if (!crypto_cmd)
290 return -ENOMEM;
291
292 /* The tfm pointer must be saved and not referenced from the
293 * crypto_async_request (req) pointer because it is used after
294 * completion callback for the request and the req pointer
295 * might not be valid anymore.
296 */
297 crypto_cmd->cmd = cmd;
298 crypto_cmd->req = req;
299 crypto_cmd->tfm = req->tfm;
300
301 cmd->callback = ccp_crypto_complete;
302 cmd->data = crypto_cmd;
303
304 if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
305 cmd->flags |= CCP_CMD_MAY_BACKLOG;
306 else
307 cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
308
309 ret = ccp_crypto_enqueue_cmd(crypto_cmd);
310 if (!ccp_crypto_success(ret))
311 kfree(crypto_cmd);
312
313 return ret;
314}
315
316struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
317 struct scatterlist *sg_add)
318{
319 struct scatterlist *sg, *sg_last = NULL;
320
321 for (sg = table->sgl; sg; sg = sg_next(sg))
322 if (!sg_page(sg))
323 break;
324 BUG_ON(!sg);
325
326 for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
327 sg_set_page(sg, sg_page(sg_add), sg_add->length,
328 sg_add->offset);
329 sg_last = sg;
330 }
331 BUG_ON(sg_add);
332
333 return sg_last;
334}
335
336static int ccp_register_algs(void)
337{
338 int ret;
339
340 ret = ccp_register_aes_algs(&cipher_algs);
341 if (ret)
342 return ret;
343
344 ret = ccp_register_aes_cmac_algs(&hash_algs);
345 if (ret)
346 return ret;
347
348 ret = ccp_register_aes_xts_algs(&cipher_algs);
349 if (ret)
350 return ret;
351
352 ret = ccp_register_sha_algs(&hash_algs);
353 if (ret)
354 return ret;
355
356 return 0;
357}
358
359static void ccp_unregister_algs(void)
360{
361 struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
362 struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
363
364 list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
365 crypto_unregister_ahash(&ahash_alg->alg);
366 list_del(&ahash_alg->entry);
367 kfree(ahash_alg);
368 }
369
370 list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) {
371 crypto_unregister_alg(&ablk_alg->alg);
372 list_del(&ablk_alg->entry);
373 kfree(ablk_alg);
374 }
375}
376
377static int ccp_init_queues(void)
378{
379 struct ccp_crypto_cpu_queue *cpu_queue;
380 int cpu;
381
382 req_queue.cpu_queue = alloc_percpu(struct ccp_crypto_cpu_queue);
383 if (!req_queue.cpu_queue)
384 return -ENOMEM;
385
386 for_each_possible_cpu(cpu) {
387 cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu);
388 INIT_LIST_HEAD(&cpu_queue->cmds);
389 cpu_queue->backlog = &cpu_queue->cmds;
390 cpu_queue->cmd_count = 0;
391 }
392
393 return 0;
394}
395
396static void ccp_fini_queue(void)
397{
398 struct ccp_crypto_cpu_queue *cpu_queue;
399 int cpu;
400
401 for_each_possible_cpu(cpu) {
402 cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu);
403 BUG_ON(!list_empty(&cpu_queue->cmds));
404 }
405 free_percpu(req_queue.cpu_queue);
406}
407
408static int ccp_crypto_init(void)
409{
410 int ret;
411
412 ret = ccp_init_queues();
413 if (ret)
414 return ret;
415
416 ret = ccp_register_algs();
417 if (ret) {
418 ccp_unregister_algs();
419 ccp_fini_queue();
420 }
421
422 return ret;
423}
424
425static void ccp_crypto_exit(void)
426{
427 ccp_unregister_algs();
428 ccp_fini_queue();
429}
430
431module_init(ccp_crypto_init);
432module_exit(ccp_crypto_exit);
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
new file mode 100644
index 000000000000..45f17c394e4a
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -0,0 +1,191 @@
1/*
2 * AMD Cryptographic Coprocessor (CCP) crypto API support
3 *
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef __CCP_CRYPTO_H__
14#define __CCP_CRYPTO_H__
15
16
17#include <linux/list.h>
18#include <linux/wait.h>
19#include <linux/pci.h>
20#include <linux/ccp.h>
21#include <linux/crypto.h>
22#include <crypto/algapi.h>
23#include <crypto/aes.h>
24#include <crypto/ctr.h>
25#include <crypto/hash.h>
26#include <crypto/sha.h>
27
28
29#define CCP_CRA_PRIORITY 300
30
31struct ccp_crypto_ablkcipher_alg {
32 struct list_head entry;
33
34 u32 mode;
35
36 struct crypto_alg alg;
37};
38
39struct ccp_crypto_ahash_alg {
40 struct list_head entry;
41
42 const u32 *init;
43 u32 type;
44 u32 mode;
45
46 /* Child algorithm used for HMAC, CMAC, etc */
47 char child_alg[CRYPTO_MAX_ALG_NAME];
48
49 struct ahash_alg alg;
50};
51
52static inline struct ccp_crypto_ablkcipher_alg *
53 ccp_crypto_ablkcipher_alg(struct crypto_tfm *tfm)
54{
55 struct crypto_alg *alg = tfm->__crt_alg;
56
57 return container_of(alg, struct ccp_crypto_ablkcipher_alg, alg);
58}
59
60static inline struct ccp_crypto_ahash_alg *
61 ccp_crypto_ahash_alg(struct crypto_tfm *tfm)
62{
63 struct crypto_alg *alg = tfm->__crt_alg;
64 struct ahash_alg *ahash_alg;
65
66 ahash_alg = container_of(alg, struct ahash_alg, halg.base);
67
68 return container_of(ahash_alg, struct ccp_crypto_ahash_alg, alg);
69}
70
71
72/***** AES related defines *****/
73struct ccp_aes_ctx {
74 /* Fallback cipher for XTS with unsupported unit sizes */
75 struct crypto_ablkcipher *tfm_ablkcipher;
76
77 /* Cipher used to generate CMAC K1/K2 keys */
78 struct crypto_cipher *tfm_cipher;
79
80 enum ccp_engine engine;
81 enum ccp_aes_type type;
82 enum ccp_aes_mode mode;
83
84 struct scatterlist key_sg;
85 unsigned int key_len;
86 u8 key[AES_MAX_KEY_SIZE];
87
88 u8 nonce[CTR_RFC3686_NONCE_SIZE];
89
90 /* CMAC key structures */
91 struct scatterlist k1_sg;
92 struct scatterlist k2_sg;
93 unsigned int kn_len;
94 u8 k1[AES_BLOCK_SIZE];
95 u8 k2[AES_BLOCK_SIZE];
96};
97
98struct ccp_aes_req_ctx {
99 struct scatterlist iv_sg;
100 u8 iv[AES_BLOCK_SIZE];
101
102 /* Fields used for RFC3686 requests */
103 u8 *rfc3686_info;
104 u8 rfc3686_iv[AES_BLOCK_SIZE];
105
106 struct ccp_cmd cmd;
107};
108
109struct ccp_aes_cmac_req_ctx {
110 unsigned int null_msg;
111 unsigned int final;
112
113 unsigned int hash_cnt;
114 unsigned int hash_rem;
115
116 struct sg_table data_sg;
117
118 struct scatterlist iv_sg;
119 u8 iv[AES_BLOCK_SIZE];
120
121 struct scatterlist buf_sg;
122 unsigned int buf_count;
123 u8 buf[AES_BLOCK_SIZE];
124
125 struct scatterlist pad_sg;
126 unsigned int pad_count;
127 u8 pad[AES_BLOCK_SIZE];
128
129 struct ccp_cmd cmd;
130};
131
132/***** SHA related defines *****/
133#define MAX_SHA_CONTEXT_SIZE SHA256_DIGEST_SIZE
134#define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
135
136struct ccp_sha_ctx {
137 unsigned int key_len;
138 u8 key[MAX_SHA_BLOCK_SIZE];
139 u8 ipad[MAX_SHA_BLOCK_SIZE];
140 u8 opad[MAX_SHA_BLOCK_SIZE];
141 struct crypto_ahash *hmac_tfm;
142};
143
144struct ccp_sha_req_ctx {
145 enum ccp_sha_type type;
146
147 u64 msg_bits;
148
149 unsigned int first;
150 unsigned int final;
151
152 unsigned int hash_cnt;
153 unsigned int hash_rem;
154
155 struct sg_table data_sg;
156
157 struct scatterlist ctx_sg;
158 u8 ctx[MAX_SHA_CONTEXT_SIZE];
159
160 struct scatterlist buf_sg;
161 unsigned int buf_count;
162 u8 buf[MAX_SHA_BLOCK_SIZE];
163
164 /* HMAC support field */
165 struct scatterlist pad_sg;
166
167 /* CCP driver command */
168 struct ccp_cmd cmd;
169};
170
171/***** Common Context Structure *****/
172struct ccp_ctx {
173 int (*complete)(struct crypto_async_request *req, int ret);
174
175 union {
176 struct ccp_aes_ctx aes;
177 struct ccp_sha_ctx sha;
178 } u;
179};
180
181int ccp_crypto_enqueue_request(struct crypto_async_request *req,
182 struct ccp_cmd *cmd);
183struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
184 struct scatterlist *sg_add);
185
186int ccp_register_aes_algs(struct list_head *head);
187int ccp_register_aes_cmac_algs(struct list_head *head);
188int ccp_register_aes_xts_algs(struct list_head *head);
189int ccp_register_sha_algs(struct list_head *head);
190
191#endif