diff options
-rw-r--r-- | drivers/crypto/ccp/ccp-dev-v3.c | 52 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-dev.h | 74 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-ops.c | 52 |
3 files changed, 98 insertions, 80 deletions
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index 19eafb85708f..5b0659933b2b 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c | |||
@@ -20,6 +20,56 @@ | |||
20 | 20 | ||
21 | #include "ccp-dev.h" | 21 | #include "ccp-dev.h" |
22 | 22 | ||
23 | static u32 ccp_alloc_ksb(struct ccp_cmd_queue *cmd_q, unsigned int count) | ||
24 | { | ||
25 | int start; | ||
26 | struct ccp_device *ccp = cmd_q->ccp; | ||
27 | |||
28 | for (;;) { | ||
29 | mutex_lock(&ccp->sb_mutex); | ||
30 | |||
31 | start = (u32)bitmap_find_next_zero_area(ccp->sb, | ||
32 | ccp->sb_count, | ||
33 | ccp->sb_start, | ||
34 | count, 0); | ||
35 | if (start <= ccp->sb_count) { | ||
36 | bitmap_set(ccp->sb, start, count); | ||
37 | |||
38 | mutex_unlock(&ccp->sb_mutex); | ||
39 | break; | ||
40 | } | ||
41 | |||
42 | ccp->sb_avail = 0; | ||
43 | |||
44 | mutex_unlock(&ccp->sb_mutex); | ||
45 | |||
46 | /* Wait for KSB entries to become available */ | ||
47 | if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail)) | ||
48 | return 0; | ||
49 | } | ||
50 | |||
51 | return KSB_START + start; | ||
52 | } | ||
53 | |||
54 | static void ccp_free_ksb(struct ccp_cmd_queue *cmd_q, unsigned int start, | ||
55 | unsigned int count) | ||
56 | { | ||
57 | struct ccp_device *ccp = cmd_q->ccp; | ||
58 | |||
59 | if (!start) | ||
60 | return; | ||
61 | |||
62 | mutex_lock(&ccp->sb_mutex); | ||
63 | |||
64 | bitmap_clear(ccp->sb, start - KSB_START, count); | ||
65 | |||
66 | ccp->sb_avail = 1; | ||
67 | |||
68 | mutex_unlock(&ccp->sb_mutex); | ||
69 | |||
70 | wake_up_interruptible_all(&ccp->sb_queue); | ||
71 | } | ||
72 | |||
23 | static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count) | 73 | static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count) |
24 | { | 74 | { |
25 | struct ccp_cmd_queue *cmd_q = op->cmd_q; | 75 | struct ccp_cmd_queue *cmd_q = op->cmd_q; |
@@ -534,6 +584,8 @@ static const struct ccp_actions ccp3_actions = { | |||
534 | .rsa = ccp_perform_rsa, | 584 | .rsa = ccp_perform_rsa, |
535 | .passthru = ccp_perform_passthru, | 585 | .passthru = ccp_perform_passthru, |
536 | .ecc = ccp_perform_ecc, | 586 | .ecc = ccp_perform_ecc, |
587 | .sballoc = ccp_alloc_ksb, | ||
588 | .sbfree = ccp_free_ksb, | ||
537 | .init = ccp_init, | 589 | .init = ccp_init, |
538 | .destroy = ccp_destroy, | 590 | .destroy = ccp_destroy, |
539 | .irqhandler = ccp_irq_handler, | 591 | .irqhandler = ccp_irq_handler, |
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 1e30568d7c04..4e38a61fbe5d 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h | |||
@@ -147,30 +147,6 @@ | |||
147 | #define CCP_SB_BYTES 32 | 147 | #define CCP_SB_BYTES 32 |
148 | 148 | ||
149 | struct ccp_op; | 149 | struct ccp_op; |
150 | |||
151 | /* Structure for computation functions that are device-specific */ | ||
152 | struct ccp_actions { | ||
153 | int (*aes)(struct ccp_op *); | ||
154 | int (*xts_aes)(struct ccp_op *); | ||
155 | int (*sha)(struct ccp_op *); | ||
156 | int (*rsa)(struct ccp_op *); | ||
157 | int (*passthru)(struct ccp_op *); | ||
158 | int (*ecc)(struct ccp_op *); | ||
159 | int (*init)(struct ccp_device *); | ||
160 | void (*destroy)(struct ccp_device *); | ||
161 | irqreturn_t (*irqhandler)(int, void *); | ||
162 | }; | ||
163 | |||
164 | /* Structure to hold CCP version-specific values */ | ||
165 | struct ccp_vdata { | ||
166 | unsigned int version; | ||
167 | const struct ccp_actions *perform; | ||
168 | const unsigned int bar; | ||
169 | const unsigned int offset; | ||
170 | }; | ||
171 | |||
172 | extern struct ccp_vdata ccpv3; | ||
173 | |||
174 | struct ccp_device; | 150 | struct ccp_device; |
175 | struct ccp_cmd; | 151 | struct ccp_cmd; |
176 | 152 | ||
@@ -306,13 +282,22 @@ struct ccp_device { | |||
306 | */ | 282 | */ |
307 | atomic_t current_id ____cacheline_aligned; | 283 | atomic_t current_id ____cacheline_aligned; |
308 | 284 | ||
309 | /* The CCP uses key storage blocks (KSB) to maintain context for certain | 285 | /* The v3 CCP uses key storage blocks (SB) to maintain context for |
310 | * operations. To prevent multiple cmds from using the same KSB range | 286 | * certain operations. To prevent multiple cmds from using the same |
311 | * a command queue reserves a KSB range for the duration of the cmd. | 287 | * SB range a command queue reserves an SB range for the duration of |
312 | * Each queue, will however, reserve 2 KSB blocks for operations that | 288 | * the cmd. Each queue, will however, reserve 2 SB blocks for |
313 | * only require single KSB entries (eg. AES context/iv and key) in order | 289 | * operations that only require single SB entries (eg. AES context/iv |
314 | * to avoid allocation contention. This will reserve at most 10 KSB | 290 | * and key) in order to avoid allocation contention. This will reserve |
315 | * entries, leaving 40 KSB entries available for dynamic allocation. | 291 | * at most 10 SB entries, leaving 40 SB entries available for dynamic |
292 | * allocation. | ||
293 | * | ||
294 | * The v5 CCP Local Storage Block (LSB) is broken up into 8 | ||
295 | * memrory ranges, each of which can be enabled for access by one | ||
296 | * or more queues. Device initialization takes this into account, | ||
297 | * and attempts to assign one region for exclusive use by each | ||
298 | * available queue; the rest are then aggregated as "public" use. | ||
299 | * If there are fewer regions than queues, all regions are shared | ||
300 | * amongst all queues. | ||
316 | */ | 301 | */ |
317 | struct mutex sb_mutex ____cacheline_aligned; | 302 | struct mutex sb_mutex ____cacheline_aligned; |
318 | DECLARE_BITMAP(sb, KSB_COUNT); | 303 | DECLARE_BITMAP(sb, KSB_COUNT); |
@@ -461,4 +446,31 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd); | |||
461 | int ccp_dmaengine_register(struct ccp_device *ccp); | 446 | int ccp_dmaengine_register(struct ccp_device *ccp); |
462 | void ccp_dmaengine_unregister(struct ccp_device *ccp); | 447 | void ccp_dmaengine_unregister(struct ccp_device *ccp); |
463 | 448 | ||
449 | /* Structure for computation functions that are device-specific */ | ||
450 | struct ccp_actions { | ||
451 | int (*aes)(struct ccp_op *); | ||
452 | int (*xts_aes)(struct ccp_op *); | ||
453 | int (*sha)(struct ccp_op *); | ||
454 | int (*rsa)(struct ccp_op *); | ||
455 | int (*passthru)(struct ccp_op *); | ||
456 | int (*ecc)(struct ccp_op *); | ||
457 | u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); | ||
458 | void (*sbfree)(struct ccp_cmd_queue *, unsigned int, | ||
459 | unsigned int); | ||
460 | int (*init)(struct ccp_device *); | ||
461 | void (*destroy)(struct ccp_device *); | ||
462 | irqreturn_t (*irqhandler)(int, void *); | ||
463 | }; | ||
464 | |||
465 | /* Structure to hold CCP version-specific values */ | ||
466 | struct ccp_vdata { | ||
467 | unsigned int version; | ||
468 | int (*init)(struct ccp_device *); | ||
469 | const struct ccp_actions *perform; | ||
470 | const unsigned int bar; | ||
471 | const unsigned int offset; | ||
472 | }; | ||
473 | |||
474 | extern struct ccp_vdata ccpv3; | ||
475 | |||
464 | #endif | 476 | #endif |
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 2c2890a4c2e2..bd9eb1d4512a 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c | |||
@@ -41,53 +41,6 @@ static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { | |||
41 | cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), | 41 | cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), |
42 | }; | 42 | }; |
43 | 43 | ||
44 | static u32 ccp_alloc_ksb(struct ccp_device *ccp, unsigned int count) | ||
45 | { | ||
46 | int start; | ||
47 | |||
48 | for (;;) { | ||
49 | mutex_lock(&ccp->sb_mutex); | ||
50 | |||
51 | start = (u32)bitmap_find_next_zero_area(ccp->sb, | ||
52 | ccp->sb_count, | ||
53 | ccp->sb_start, | ||
54 | count, 0); | ||
55 | if (start <= ccp->sb_count) { | ||
56 | bitmap_set(ccp->sb, start, count); | ||
57 | |||
58 | mutex_unlock(&ccp->sb_mutex); | ||
59 | break; | ||
60 | } | ||
61 | |||
62 | ccp->sb_avail = 0; | ||
63 | |||
64 | mutex_unlock(&ccp->sb_mutex); | ||
65 | |||
66 | /* Wait for KSB entries to become available */ | ||
67 | if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail)) | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | return KSB_START + start; | ||
72 | } | ||
73 | |||
74 | static void ccp_free_ksb(struct ccp_device *ccp, unsigned int start, | ||
75 | unsigned int count) | ||
76 | { | ||
77 | if (!start) | ||
78 | return; | ||
79 | |||
80 | mutex_lock(&ccp->sb_mutex); | ||
81 | |||
82 | bitmap_clear(ccp->sb, start - KSB_START, count); | ||
83 | |||
84 | ccp->sb_avail = 1; | ||
85 | |||
86 | mutex_unlock(&ccp->sb_mutex); | ||
87 | |||
88 | wake_up_interruptible_all(&ccp->sb_queue); | ||
89 | } | ||
90 | |||
91 | static u32 ccp_gen_jobid(struct ccp_device *ccp) | 44 | static u32 ccp_gen_jobid(struct ccp_device *ccp) |
92 | { | 45 | { |
93 | return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK; | 46 | return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK; |
@@ -1214,7 +1167,8 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1214 | memset(&op, 0, sizeof(op)); | 1167 | memset(&op, 0, sizeof(op)); |
1215 | op.cmd_q = cmd_q; | 1168 | op.cmd_q = cmd_q; |
1216 | op.jobid = ccp_gen_jobid(cmd_q->ccp); | 1169 | op.jobid = ccp_gen_jobid(cmd_q->ccp); |
1217 | op.sb_key = ccp_alloc_ksb(cmd_q->ccp, sb_count); | 1170 | op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count); |
1171 | |||
1218 | if (!op.sb_key) | 1172 | if (!op.sb_key) |
1219 | return -EIO; | 1173 | return -EIO; |
1220 | 1174 | ||
@@ -1293,7 +1247,7 @@ e_exp: | |||
1293 | ccp_dm_free(&exp); | 1247 | ccp_dm_free(&exp); |
1294 | 1248 | ||
1295 | e_sb: | 1249 | e_sb: |
1296 | ccp_free_ksb(cmd_q->ccp, op.sb_key, sb_count); | 1250 | cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count); |
1297 | 1251 | ||
1298 | return ret; | 1252 | return ret; |
1299 | } | 1253 | } |