aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorTom Lendacky <thomas.lendacky@amd.com>2014-01-24 17:18:08 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2014-02-08 20:59:24 -0500
commitbc3854476f36d816d52cd8d41d1ecab2f8b6cdcf (patch)
tree4554073b75d21dac505e5992986b43ca5cc00a28 /drivers/crypto
parentc11baa02c5d6ea06362fa61da070af34b7706c83 (diff)
crypto: ccp - Use a single queue for proper ordering of tfm requests
Move to a single queue to serialize requests within a tfm. When testing using IPSec with a large number of network connections the per cpu tfm queuing logic was not working properly. Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c164
1 files changed, 48 insertions, 116 deletions
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index b3f22b07b5bd..010fded5d46b 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -38,23 +38,20 @@ MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
38static LIST_HEAD(hash_algs); 38static LIST_HEAD(hash_algs);
39static LIST_HEAD(cipher_algs); 39static LIST_HEAD(cipher_algs);
40 40
41/* For any tfm, requests for that tfm on the same CPU must be returned 41/* For any tfm, requests for that tfm must be returned on the order
42 * in the order received. With multiple queues available, the CCP can 42 * received. With multiple queues available, the CCP can process more
43 * process more than one cmd at a time. Therefore we must maintain 43 * than one cmd at a time. Therefore we must maintain a cmd list to insure
44 * a cmd list to insure the proper ordering of requests on a given tfm/cpu 44 * the proper ordering of requests on a given tfm.
45 * combination.
46 */ 45 */
47struct ccp_crypto_cpu_queue { 46struct ccp_crypto_queue {
48 struct list_head cmds; 47 struct list_head cmds;
49 struct list_head *backlog; 48 struct list_head *backlog;
50 unsigned int cmd_count; 49 unsigned int cmd_count;
51}; 50};
52#define CCP_CRYPTO_MAX_QLEN 50 51#define CCP_CRYPTO_MAX_QLEN 100
53 52
54struct ccp_crypto_percpu_queue { 53static struct ccp_crypto_queue req_queue;
55 struct ccp_crypto_cpu_queue __percpu *cpu_queue; 54static spinlock_t req_queue_lock;
56};
57static struct ccp_crypto_percpu_queue req_queue;
58 55
59struct ccp_crypto_cmd { 56struct ccp_crypto_cmd {
60 struct list_head entry; 57 struct list_head entry;
@@ -71,8 +68,6 @@ struct ccp_crypto_cmd {
71 68
72 /* Used for held command processing to determine state */ 69 /* Used for held command processing to determine state */
73 int ret; 70 int ret;
74
75 int cpu;
76}; 71};
77 72
78struct ccp_crypto_cpu { 73struct ccp_crypto_cpu {
@@ -91,25 +86,21 @@ static inline bool ccp_crypto_success(int err)
91 return true; 86 return true;
92} 87}
93 88
94/*
95 * ccp_crypto_cmd_complete must be called while running on the appropriate
96 * cpu and the caller must have done a get_cpu to disable preemption
97 */
98static struct ccp_crypto_cmd *ccp_crypto_cmd_complete( 89static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
99 struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog) 90 struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
100{ 91{
101 struct ccp_crypto_cpu_queue *cpu_queue;
102 struct ccp_crypto_cmd *held = NULL, *tmp; 92 struct ccp_crypto_cmd *held = NULL, *tmp;
93 unsigned long flags;
103 94
104 *backlog = NULL; 95 *backlog = NULL;
105 96
106 cpu_queue = this_cpu_ptr(req_queue.cpu_queue); 97 spin_lock_irqsave(&req_queue_lock, flags);
107 98
108 /* Held cmds will be after the current cmd in the queue so start 99 /* Held cmds will be after the current cmd in the queue so start
109 * searching for a cmd with a matching tfm for submission. 100 * searching for a cmd with a matching tfm for submission.
110 */ 101 */
111 tmp = crypto_cmd; 102 tmp = crypto_cmd;
112 list_for_each_entry_continue(tmp, &cpu_queue->cmds, entry) { 103 list_for_each_entry_continue(tmp, &req_queue.cmds, entry) {
113 if (crypto_cmd->tfm != tmp->tfm) 104 if (crypto_cmd->tfm != tmp->tfm)
114 continue; 105 continue;
115 held = tmp; 106 held = tmp;
@@ -120,47 +111,45 @@ static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
120 * Because cmds can be executed from any point in the cmd list 111 * Because cmds can be executed from any point in the cmd list
121 * special precautions have to be taken when handling the backlog. 112 * special precautions have to be taken when handling the backlog.
122 */ 113 */
123 if (cpu_queue->backlog != &cpu_queue->cmds) { 114 if (req_queue.backlog != &req_queue.cmds) {
124 /* Skip over this cmd if it is the next backlog cmd */ 115 /* Skip over this cmd if it is the next backlog cmd */
125 if (cpu_queue->backlog == &crypto_cmd->entry) 116 if (req_queue.backlog == &crypto_cmd->entry)
126 cpu_queue->backlog = crypto_cmd->entry.next; 117 req_queue.backlog = crypto_cmd->entry.next;
127 118
128 *backlog = container_of(cpu_queue->backlog, 119 *backlog = container_of(req_queue.backlog,
129 struct ccp_crypto_cmd, entry); 120 struct ccp_crypto_cmd, entry);
130 cpu_queue->backlog = cpu_queue->backlog->next; 121 req_queue.backlog = req_queue.backlog->next;
131 122
132 /* Skip over this cmd if it is now the next backlog cmd */ 123 /* Skip over this cmd if it is now the next backlog cmd */
133 if (cpu_queue->backlog == &crypto_cmd->entry) 124 if (req_queue.backlog == &crypto_cmd->entry)
134 cpu_queue->backlog = crypto_cmd->entry.next; 125 req_queue.backlog = crypto_cmd->entry.next;
135 } 126 }
136 127
137 /* Remove the cmd entry from the list of cmds */ 128 /* Remove the cmd entry from the list of cmds */
138 cpu_queue->cmd_count--; 129 req_queue.cmd_count--;
139 list_del(&crypto_cmd->entry); 130 list_del(&crypto_cmd->entry);
140 131
132 spin_unlock_irqrestore(&req_queue_lock, flags);
133
141 return held; 134 return held;
142} 135}
143 136
144static void ccp_crypto_complete_on_cpu(struct work_struct *work) 137static void ccp_crypto_complete(void *data, int err)
145{ 138{
146 struct ccp_crypto_cpu *cpu_work = 139 struct ccp_crypto_cmd *crypto_cmd = data;
147 container_of(work, struct ccp_crypto_cpu, work);
148 struct ccp_crypto_cmd *crypto_cmd = cpu_work->crypto_cmd;
149 struct ccp_crypto_cmd *held, *next, *backlog; 140 struct ccp_crypto_cmd *held, *next, *backlog;
150 struct crypto_async_request *req = crypto_cmd->req; 141 struct crypto_async_request *req = crypto_cmd->req;
151 struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm); 142 struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm);
152 int cpu, ret; 143 int ret;
153
154 cpu = get_cpu();
155 144
156 if (cpu_work->err == -EINPROGRESS) { 145 if (err == -EINPROGRESS) {
157 /* Only propogate the -EINPROGRESS if necessary */ 146 /* Only propogate the -EINPROGRESS if necessary */
158 if (crypto_cmd->ret == -EBUSY) { 147 if (crypto_cmd->ret == -EBUSY) {
159 crypto_cmd->ret = -EINPROGRESS; 148 crypto_cmd->ret = -EINPROGRESS;
160 req->complete(req, -EINPROGRESS); 149 req->complete(req, -EINPROGRESS);
161 } 150 }
162 151
163 goto e_cpu; 152 return;
164 } 153 }
165 154
166 /* Operation has completed - update the queue before invoking 155 /* Operation has completed - update the queue before invoking
@@ -178,7 +167,7 @@ static void ccp_crypto_complete_on_cpu(struct work_struct *work)
178 req->complete(req, -EINPROGRESS); 167 req->complete(req, -EINPROGRESS);
179 168
180 /* Completion callbacks */ 169 /* Completion callbacks */
181 ret = cpu_work->err; 170 ret = err;
182 if (ctx->complete) 171 if (ctx->complete)
183 ret = ctx->complete(req, ret); 172 ret = ctx->complete(req, ret);
184 req->complete(req, ret); 173 req->complete(req, ret);
@@ -203,52 +192,28 @@ static void ccp_crypto_complete_on_cpu(struct work_struct *work)
203 } 192 }
204 193
205 kfree(crypto_cmd); 194 kfree(crypto_cmd);
206
207e_cpu:
208 put_cpu();
209
210 complete(&cpu_work->completion);
211}
212
213static void ccp_crypto_complete(void *data, int err)
214{
215 struct ccp_crypto_cmd *crypto_cmd = data;
216 struct ccp_crypto_cpu cpu_work;
217
218 INIT_WORK(&cpu_work.work, ccp_crypto_complete_on_cpu);
219 init_completion(&cpu_work.completion);
220 cpu_work.crypto_cmd = crypto_cmd;
221 cpu_work.err = err;
222
223 schedule_work_on(crypto_cmd->cpu, &cpu_work.work);
224
225 /* Keep the completion call synchronous */
226 wait_for_completion(&cpu_work.completion);
227} 195}
228 196
229static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) 197static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
230{ 198{
231 struct ccp_crypto_cpu_queue *cpu_queue;
232 struct ccp_crypto_cmd *active = NULL, *tmp; 199 struct ccp_crypto_cmd *active = NULL, *tmp;
233 int cpu, ret; 200 unsigned long flags;
234 201 int ret;
235 cpu = get_cpu();
236 crypto_cmd->cpu = cpu;
237 202
238 cpu_queue = this_cpu_ptr(req_queue.cpu_queue); 203 spin_lock_irqsave(&req_queue_lock, flags);
239 204
240 /* Check if the cmd can/should be queued */ 205 /* Check if the cmd can/should be queued */
241 if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) { 206 if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
242 ret = -EBUSY; 207 ret = -EBUSY;
243 if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) 208 if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
244 goto e_cpu; 209 goto e_lock;
245 } 210 }
246 211
247 /* Look for an entry with the same tfm. If there is a cmd 212 /* Look for an entry with the same tfm. If there is a cmd
248 * with the same tfm in the list for this cpu then the current 213 * with the same tfm in the list then the current cmd cannot
249 * cmd cannot be submitted to the CCP yet. 214 * be submitted to the CCP yet.
250 */ 215 */
251 list_for_each_entry(tmp, &cpu_queue->cmds, entry) { 216 list_for_each_entry(tmp, &req_queue.cmds, entry) {
252 if (crypto_cmd->tfm != tmp->tfm) 217 if (crypto_cmd->tfm != tmp->tfm)
253 continue; 218 continue;
254 active = tmp; 219 active = tmp;
@@ -259,21 +224,21 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
259 if (!active) { 224 if (!active) {
260 ret = ccp_enqueue_cmd(crypto_cmd->cmd); 225 ret = ccp_enqueue_cmd(crypto_cmd->cmd);
261 if (!ccp_crypto_success(ret)) 226 if (!ccp_crypto_success(ret))
262 goto e_cpu; 227 goto e_lock;
263 } 228 }
264 229
265 if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) { 230 if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
266 ret = -EBUSY; 231 ret = -EBUSY;
267 if (cpu_queue->backlog == &cpu_queue->cmds) 232 if (req_queue.backlog == &req_queue.cmds)
268 cpu_queue->backlog = &crypto_cmd->entry; 233 req_queue.backlog = &crypto_cmd->entry;
269 } 234 }
270 crypto_cmd->ret = ret; 235 crypto_cmd->ret = ret;
271 236
272 cpu_queue->cmd_count++; 237 req_queue.cmd_count++;
273 list_add_tail(&crypto_cmd->entry, &cpu_queue->cmds); 238 list_add_tail(&crypto_cmd->entry, &req_queue.cmds);
274 239
275e_cpu: 240e_lock:
276 put_cpu(); 241 spin_unlock_irqrestore(&req_queue_lock, flags);
277 242
278 return ret; 243 return ret;
279} 244}
@@ -387,50 +352,18 @@ static void ccp_unregister_algs(void)
387 } 352 }
388} 353}
389 354
390static int ccp_init_queues(void)
391{
392 struct ccp_crypto_cpu_queue *cpu_queue;
393 int cpu;
394
395 req_queue.cpu_queue = alloc_percpu(struct ccp_crypto_cpu_queue);
396 if (!req_queue.cpu_queue)
397 return -ENOMEM;
398
399 for_each_possible_cpu(cpu) {
400 cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu);
401 INIT_LIST_HEAD(&cpu_queue->cmds);
402 cpu_queue->backlog = &cpu_queue->cmds;
403 cpu_queue->cmd_count = 0;
404 }
405
406 return 0;
407}
408
409static void ccp_fini_queue(void)
410{
411 struct ccp_crypto_cpu_queue *cpu_queue;
412 int cpu;
413
414 for_each_possible_cpu(cpu) {
415 cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu);
416 BUG_ON(!list_empty(&cpu_queue->cmds));
417 }
418 free_percpu(req_queue.cpu_queue);
419}
420
421static int ccp_crypto_init(void) 355static int ccp_crypto_init(void)
422{ 356{
423 int ret; 357 int ret;
424 358
425 ret = ccp_init_queues(); 359 spin_lock_init(&req_queue_lock);
426 if (ret) 360 INIT_LIST_HEAD(&req_queue.cmds);
427 return ret; 361 req_queue.backlog = &req_queue.cmds;
362 req_queue.cmd_count = 0;
428 363
429 ret = ccp_register_algs(); 364 ret = ccp_register_algs();
430 if (ret) { 365 if (ret)
431 ccp_unregister_algs(); 366 ccp_unregister_algs();
432 ccp_fini_queue();
433 }
434 367
435 return ret; 368 return ret;
436} 369}
@@ -438,7 +371,6 @@ static int ccp_crypto_init(void)
438static void ccp_crypto_exit(void) 371static void ccp_crypto_exit(void)
439{ 372{
440 ccp_unregister_algs(); 373 ccp_unregister_algs();
441 ccp_fini_queue();
442} 374}
443 375
444module_init(ccp_crypto_init); 376module_init(ccp_crypto_init);