aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGary R Hook <gary.hook@amd.com>2016-07-26 20:09:40 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2016-08-09 06:47:08 -0400
commit956ee21a6df08afd9c1c64e0f394a9a1b65e897d (patch)
tree1b6055656feda6842212c252506e42b4e803bdb9
parenta43eb98507574acfc435c38a6b7fb1fab6605519 (diff)
crypto: ccp - Refactoring: symbol cleanup
Form and use of the local storage block in the CCP is particular to the device version. Much of the code that accesses the storage block can treat it as a virtual resource, and will under go some renaming. Device-specific access to the memory will be moved into device file. Service functions will be added to the actions structure. Signed-off-by: Gary R Hook <gary.hook@amd.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c32
-rw-r--r--drivers/crypto/ccp/ccp-dev.c7
-rw-r--r--drivers/crypto/ccp/ccp-dev.h43
-rw-r--r--drivers/crypto/ccp/ccp-ops.c266
4 files changed, 175 insertions, 173 deletions
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 1a94d2ea4ff1..19eafb85708f 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -100,10 +100,10 @@ static int ccp_perform_aes(struct ccp_op *op)
100 | (op->u.aes.type << REQ1_AES_TYPE_SHIFT) 100 | (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
101 | (op->u.aes.mode << REQ1_AES_MODE_SHIFT) 101 | (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
102 | (op->u.aes.action << REQ1_AES_ACTION_SHIFT) 102 | (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
103 | (op->ksb_key << REQ1_KEY_KSB_SHIFT); 103 | (op->sb_key << REQ1_KEY_KSB_SHIFT);
104 cr[1] = op->src.u.dma.length - 1; 104 cr[1] = op->src.u.dma.length - 1;
105 cr[2] = ccp_addr_lo(&op->src.u.dma); 105 cr[2] = ccp_addr_lo(&op->src.u.dma);
106 cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) 106 cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
107 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) 107 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
108 | ccp_addr_hi(&op->src.u.dma); 108 | ccp_addr_hi(&op->src.u.dma);
109 cr[4] = ccp_addr_lo(&op->dst.u.dma); 109 cr[4] = ccp_addr_lo(&op->dst.u.dma);
@@ -130,10 +130,10 @@ static int ccp_perform_xts_aes(struct ccp_op *op)
130 cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT) 130 cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
131 | (op->u.xts.action << REQ1_AES_ACTION_SHIFT) 131 | (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
132 | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT) 132 | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
133 | (op->ksb_key << REQ1_KEY_KSB_SHIFT); 133 | (op->sb_key << REQ1_KEY_KSB_SHIFT);
134 cr[1] = op->src.u.dma.length - 1; 134 cr[1] = op->src.u.dma.length - 1;
135 cr[2] = ccp_addr_lo(&op->src.u.dma); 135 cr[2] = ccp_addr_lo(&op->src.u.dma);
136 cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) 136 cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
137 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) 137 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
138 | ccp_addr_hi(&op->src.u.dma); 138 | ccp_addr_hi(&op->src.u.dma);
139 cr[4] = ccp_addr_lo(&op->dst.u.dma); 139 cr[4] = ccp_addr_lo(&op->dst.u.dma);
@@ -159,7 +159,7 @@ static int ccp_perform_sha(struct ccp_op *op)
159 | REQ1_INIT; 159 | REQ1_INIT;
160 cr[1] = op->src.u.dma.length - 1; 160 cr[1] = op->src.u.dma.length - 1;
161 cr[2] = ccp_addr_lo(&op->src.u.dma); 161 cr[2] = ccp_addr_lo(&op->src.u.dma);
162 cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) 162 cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
163 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) 163 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
164 | ccp_addr_hi(&op->src.u.dma); 164 | ccp_addr_hi(&op->src.u.dma);
165 165
@@ -182,11 +182,11 @@ static int ccp_perform_rsa(struct ccp_op *op)
182 /* Fill out the register contents for REQ1 through REQ6 */ 182 /* Fill out the register contents for REQ1 through REQ6 */
183 cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT) 183 cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
184 | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT) 184 | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
185 | (op->ksb_key << REQ1_KEY_KSB_SHIFT) 185 | (op->sb_key << REQ1_KEY_KSB_SHIFT)
186 | REQ1_EOM; 186 | REQ1_EOM;
187 cr[1] = op->u.rsa.input_len - 1; 187 cr[1] = op->u.rsa.input_len - 1;
188 cr[2] = ccp_addr_lo(&op->src.u.dma); 188 cr[2] = ccp_addr_lo(&op->src.u.dma);
189 cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) 189 cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
190 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) 190 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
191 | ccp_addr_hi(&op->src.u.dma); 191 | ccp_addr_hi(&op->src.u.dma);
192 cr[4] = ccp_addr_lo(&op->dst.u.dma); 192 cr[4] = ccp_addr_lo(&op->dst.u.dma);
@@ -216,10 +216,10 @@ static int ccp_perform_passthru(struct ccp_op *op)
216 | ccp_addr_hi(&op->src.u.dma); 216 | ccp_addr_hi(&op->src.u.dma);
217 217
218 if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP) 218 if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
219 cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT); 219 cr[3] |= (op->sb_key << REQ4_KSB_SHIFT);
220 } else { 220 } else {
221 cr[2] = op->src.u.ksb * CCP_KSB_BYTES; 221 cr[2] = op->src.u.sb * CCP_SB_BYTES;
222 cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT); 222 cr[3] = (CCP_MEMTYPE_SB << REQ4_MEMTYPE_SHIFT);
223 } 223 }
224 224
225 if (op->dst.type == CCP_MEMTYPE_SYSTEM) { 225 if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
@@ -227,8 +227,8 @@ static int ccp_perform_passthru(struct ccp_op *op)
227 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) 227 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
228 | ccp_addr_hi(&op->dst.u.dma); 228 | ccp_addr_hi(&op->dst.u.dma);
229 } else { 229 } else {
230 cr[4] = op->dst.u.ksb * CCP_KSB_BYTES; 230 cr[4] = op->dst.u.sb * CCP_SB_BYTES;
231 cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT); 231 cr[5] = (CCP_MEMTYPE_SB << REQ6_MEMTYPE_SHIFT);
232 } 232 }
233 233
234 if (op->eom) 234 if (op->eom)
@@ -322,9 +322,9 @@ static int ccp_init(struct ccp_device *ccp)
322 cmd_q->dma_pool = dma_pool; 322 cmd_q->dma_pool = dma_pool;
323 323
324 /* Reserve 2 KSB regions for the queue */ 324 /* Reserve 2 KSB regions for the queue */
325 cmd_q->ksb_key = KSB_START + ccp->ksb_start++; 325 cmd_q->sb_key = KSB_START + ccp->sb_start++;
326 cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++; 326 cmd_q->sb_ctx = KSB_START + ccp->sb_start++;
327 ccp->ksb_count -= 2; 327 ccp->sb_count -= 2;
328 328
329 /* Preset some register values and masks that are queue 329 /* Preset some register values and masks that are queue
330 * number dependent 330 * number dependent
@@ -376,7 +376,7 @@ static int ccp_init(struct ccp_device *ccp)
376 } 376 }
377 377
378 /* Initialize the queues used to wait for KSB space and suspend */ 378 /* Initialize the queues used to wait for KSB space and suspend */
379 init_waitqueue_head(&ccp->ksb_queue); 379 init_waitqueue_head(&ccp->sb_queue);
380 init_waitqueue_head(&ccp->suspend_queue); 380 init_waitqueue_head(&ccp->suspend_queue);
381 381
382 /* Create a kthread for each queue */ 382 /* Create a kthread for each queue */
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 87b9f2bfa623..9c8cfbb6841f 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -4,6 +4,7 @@
4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. 4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
5 * 5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com> 6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 * Author: Gary R Hook <gary.hook@amd.com>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
@@ -397,9 +398,9 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
397 398
398 spin_lock_init(&ccp->cmd_lock); 399 spin_lock_init(&ccp->cmd_lock);
399 mutex_init(&ccp->req_mutex); 400 mutex_init(&ccp->req_mutex);
400 mutex_init(&ccp->ksb_mutex); 401 mutex_init(&ccp->sb_mutex);
401 ccp->ksb_count = KSB_COUNT; 402 ccp->sb_count = KSB_COUNT;
402 ccp->ksb_start = 0; 403 ccp->sb_start = 0;
403 404
404 ccp->ord = ccp_increment_unit_ordinal(); 405 ccp->ord = ccp_increment_unit_ordinal();
405 snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord); 406 snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 46d3ef30c6e9..1e30568d7c04 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -111,8 +111,7 @@
111#define KSB_START 77 111#define KSB_START 77
112#define KSB_END 127 112#define KSB_END 127
113#define KSB_COUNT (KSB_END - KSB_START + 1) 113#define KSB_COUNT (KSB_END - KSB_START + 1)
114#define CCP_KSB_BITS 256 114#define CCP_SB_BITS 256
115#define CCP_KSB_BYTES 32
116 115
117#define CCP_JOBID_MASK 0x0000003f 116#define CCP_JOBID_MASK 0x0000003f
118 117
@@ -121,19 +120,19 @@
121 120
122#define CCP_REVERSE_BUF_SIZE 64 121#define CCP_REVERSE_BUF_SIZE 64
123 122
124#define CCP_AES_KEY_KSB_COUNT 1 123#define CCP_AES_KEY_SB_COUNT 1
125#define CCP_AES_CTX_KSB_COUNT 1 124#define CCP_AES_CTX_SB_COUNT 1
126 125
127#define CCP_XTS_AES_KEY_KSB_COUNT 1 126#define CCP_XTS_AES_KEY_SB_COUNT 1
128#define CCP_XTS_AES_CTX_KSB_COUNT 1 127#define CCP_XTS_AES_CTX_SB_COUNT 1
129 128
130#define CCP_SHA_KSB_COUNT 1 129#define CCP_SHA_SB_COUNT 1
131 130
132#define CCP_RSA_MAX_WIDTH 4096 131#define CCP_RSA_MAX_WIDTH 4096
133 132
134#define CCP_PASSTHRU_BLOCKSIZE 256 133#define CCP_PASSTHRU_BLOCKSIZE 256
135#define CCP_PASSTHRU_MASKSIZE 32 134#define CCP_PASSTHRU_MASKSIZE 32
136#define CCP_PASSTHRU_KSB_COUNT 1 135#define CCP_PASSTHRU_SB_COUNT 1
137 136
138#define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */ 137#define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */
139#define CCP_ECC_MAX_OPERANDS 6 138#define CCP_ECC_MAX_OPERANDS 6
@@ -145,6 +144,8 @@
145#define CCP_ECC_RESULT_OFFSET 60 144#define CCP_ECC_RESULT_OFFSET 60
146#define CCP_ECC_RESULT_SUCCESS 0x0001 145#define CCP_ECC_RESULT_SUCCESS 0x0001
147 146
147#define CCP_SB_BYTES 32
148
148struct ccp_op; 149struct ccp_op;
149 150
150/* Structure for computation functions that are device-specific */ 151/* Structure for computation functions that are device-specific */
@@ -215,9 +216,9 @@ struct ccp_cmd_queue {
215 /* Queue dma pool */ 216 /* Queue dma pool */
216 struct dma_pool *dma_pool; 217 struct dma_pool *dma_pool;
217 218
218 /* Queue reserved KSB regions */ 219 /* Per-queue reserved storage block(s) */
219 u32 ksb_key; 220 u32 sb_key;
220 u32 ksb_ctx; 221 u32 sb_ctx;
221 222
222 /* Queue processing thread */ 223 /* Queue processing thread */
223 struct task_struct *kthread; 224 struct task_struct *kthread;
@@ -313,12 +314,12 @@ struct ccp_device {
313 * to avoid allocation contention. This will reserve at most 10 KSB 314 * to avoid allocation contention. This will reserve at most 10 KSB
314 * entries, leaving 40 KSB entries available for dynamic allocation. 315 * entries, leaving 40 KSB entries available for dynamic allocation.
315 */ 316 */
316 struct mutex ksb_mutex ____cacheline_aligned; 317 struct mutex sb_mutex ____cacheline_aligned;
317 DECLARE_BITMAP(ksb, KSB_COUNT); 318 DECLARE_BITMAP(sb, KSB_COUNT);
318 wait_queue_head_t ksb_queue; 319 wait_queue_head_t sb_queue;
319 unsigned int ksb_avail; 320 unsigned int sb_avail;
320 unsigned int ksb_count; 321 unsigned int sb_count;
321 u32 ksb_start; 322 u32 sb_start;
322 323
323 /* Suspend support */ 324 /* Suspend support */
324 unsigned int suspending; 325 unsigned int suspending;
@@ -330,7 +331,7 @@ struct ccp_device {
330 331
331enum ccp_memtype { 332enum ccp_memtype {
332 CCP_MEMTYPE_SYSTEM = 0, 333 CCP_MEMTYPE_SYSTEM = 0,
333 CCP_MEMTYPE_KSB, 334 CCP_MEMTYPE_SB,
334 CCP_MEMTYPE_LOCAL, 335 CCP_MEMTYPE_LOCAL,
335 CCP_MEMTYPE__LAST, 336 CCP_MEMTYPE__LAST,
336}; 337};
@@ -374,7 +375,7 @@ struct ccp_mem {
374 enum ccp_memtype type; 375 enum ccp_memtype type;
375 union { 376 union {
376 struct ccp_dma_info dma; 377 struct ccp_dma_info dma;
377 u32 ksb; 378 u32 sb;
378 } u; 379 } u;
379}; 380};
380 381
@@ -414,8 +415,8 @@ struct ccp_op {
414 u32 jobid; 415 u32 jobid;
415 u32 ioc; 416 u32 ioc;
416 u32 soc; 417 u32 soc;
417 u32 ksb_key; 418 u32 sb_key;
418 u32 ksb_ctx; 419 u32 sb_ctx;
419 u32 init; 420 u32 init;
420 u32 eom; 421 u32 eom;
421 422
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index d1024771e926..2c2890a4c2e2 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -46,25 +46,25 @@ static u32 ccp_alloc_ksb(struct ccp_device *ccp, unsigned int count)
46 int start; 46 int start;
47 47
48 for (;;) { 48 for (;;) {
49 mutex_lock(&ccp->ksb_mutex); 49 mutex_lock(&ccp->sb_mutex);
50 50
51 start = (u32)bitmap_find_next_zero_area(ccp->ksb, 51 start = (u32)bitmap_find_next_zero_area(ccp->sb,
52 ccp->ksb_count, 52 ccp->sb_count,
53 ccp->ksb_start, 53 ccp->sb_start,
54 count, 0); 54 count, 0);
55 if (start <= ccp->ksb_count) { 55 if (start <= ccp->sb_count) {
56 bitmap_set(ccp->ksb, start, count); 56 bitmap_set(ccp->sb, start, count);
57 57
58 mutex_unlock(&ccp->ksb_mutex); 58 mutex_unlock(&ccp->sb_mutex);
59 break; 59 break;
60 } 60 }
61 61
62 ccp->ksb_avail = 0; 62 ccp->sb_avail = 0;
63 63
64 mutex_unlock(&ccp->ksb_mutex); 64 mutex_unlock(&ccp->sb_mutex);
65 65
66 /* Wait for KSB entries to become available */ 66 /* Wait for KSB entries to become available */
67 if (wait_event_interruptible(ccp->ksb_queue, ccp->ksb_avail)) 67 if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
68 return 0; 68 return 0;
69 } 69 }
70 70
@@ -77,15 +77,15 @@ static void ccp_free_ksb(struct ccp_device *ccp, unsigned int start,
77 if (!start) 77 if (!start)
78 return; 78 return;
79 79
80 mutex_lock(&ccp->ksb_mutex); 80 mutex_lock(&ccp->sb_mutex);
81 81
82 bitmap_clear(ccp->ksb, start - KSB_START, count); 82 bitmap_clear(ccp->sb, start - KSB_START, count);
83 83
84 ccp->ksb_avail = 1; 84 ccp->sb_avail = 1;
85 85
86 mutex_unlock(&ccp->ksb_mutex); 86 mutex_unlock(&ccp->sb_mutex);
87 87
88 wake_up_interruptible_all(&ccp->ksb_queue); 88 wake_up_interruptible_all(&ccp->sb_queue);
89} 89}
90 90
91static u32 ccp_gen_jobid(struct ccp_device *ccp) 91static u32 ccp_gen_jobid(struct ccp_device *ccp)
@@ -232,7 +232,7 @@ static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
232 unsigned int len, unsigned int se_len, 232 unsigned int len, unsigned int se_len,
233 bool sign_extend) 233 bool sign_extend)
234{ 234{
235 unsigned int nbytes, sg_offset, dm_offset, ksb_len, i; 235 unsigned int nbytes, sg_offset, dm_offset, sb_len, i;
236 u8 buffer[CCP_REVERSE_BUF_SIZE]; 236 u8 buffer[CCP_REVERSE_BUF_SIZE];
237 237
238 if (WARN_ON(se_len > sizeof(buffer))) 238 if (WARN_ON(se_len > sizeof(buffer)))
@@ -242,21 +242,21 @@ static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
242 dm_offset = 0; 242 dm_offset = 0;
243 nbytes = len; 243 nbytes = len;
244 while (nbytes) { 244 while (nbytes) {
245 ksb_len = min_t(unsigned int, nbytes, se_len); 245 sb_len = min_t(unsigned int, nbytes, se_len);
246 sg_offset -= ksb_len; 246 sg_offset -= sb_len;
247 247
248 scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 0); 248 scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 0);
249 for (i = 0; i < ksb_len; i++) 249 for (i = 0; i < sb_len; i++)
250 wa->address[dm_offset + i] = buffer[ksb_len - i - 1]; 250 wa->address[dm_offset + i] = buffer[sb_len - i - 1];
251 251
252 dm_offset += ksb_len; 252 dm_offset += sb_len;
253 nbytes -= ksb_len; 253 nbytes -= sb_len;
254 254
255 if ((ksb_len != se_len) && sign_extend) { 255 if ((sb_len != se_len) && sign_extend) {
256 /* Must sign-extend to nearest sign-extend length */ 256 /* Must sign-extend to nearest sign-extend length */
257 if (wa->address[dm_offset - 1] & 0x80) 257 if (wa->address[dm_offset - 1] & 0x80)
258 memset(wa->address + dm_offset, 0xff, 258 memset(wa->address + dm_offset, 0xff,
259 se_len - ksb_len); 259 se_len - sb_len);
260 } 260 }
261 } 261 }
262 262
@@ -267,22 +267,22 @@ static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
267 struct scatterlist *sg, 267 struct scatterlist *sg,
268 unsigned int len) 268 unsigned int len)
269{ 269{
270 unsigned int nbytes, sg_offset, dm_offset, ksb_len, i; 270 unsigned int nbytes, sg_offset, dm_offset, sb_len, i;
271 u8 buffer[CCP_REVERSE_BUF_SIZE]; 271 u8 buffer[CCP_REVERSE_BUF_SIZE];
272 272
273 sg_offset = 0; 273 sg_offset = 0;
274 dm_offset = len; 274 dm_offset = len;
275 nbytes = len; 275 nbytes = len;
276 while (nbytes) { 276 while (nbytes) {
277 ksb_len = min_t(unsigned int, nbytes, sizeof(buffer)); 277 sb_len = min_t(unsigned int, nbytes, sizeof(buffer));
278 dm_offset -= ksb_len; 278 dm_offset -= sb_len;
279 279
280 for (i = 0; i < ksb_len; i++) 280 for (i = 0; i < sb_len; i++)
281 buffer[ksb_len - i - 1] = wa->address[dm_offset + i]; 281 buffer[sb_len - i - 1] = wa->address[dm_offset + i];
282 scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 1); 282 scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 1);
283 283
284 sg_offset += ksb_len; 284 sg_offset += sb_len;
285 nbytes -= ksb_len; 285 nbytes -= sb_len;
286 } 286 }
287} 287}
288 288
@@ -450,9 +450,9 @@ static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
450 } 450 }
451} 451}
452 452
453static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q, 453static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q,
454 struct ccp_dm_workarea *wa, u32 jobid, u32 ksb, 454 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
455 u32 byte_swap, bool from) 455 u32 byte_swap, bool from)
456{ 456{
457 struct ccp_op op; 457 struct ccp_op op;
458 458
@@ -464,8 +464,8 @@ static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q,
464 464
465 if (from) { 465 if (from) {
466 op.soc = 1; 466 op.soc = 1;
467 op.src.type = CCP_MEMTYPE_KSB; 467 op.src.type = CCP_MEMTYPE_SB;
468 op.src.u.ksb = ksb; 468 op.src.u.sb = sb;
469 op.dst.type = CCP_MEMTYPE_SYSTEM; 469 op.dst.type = CCP_MEMTYPE_SYSTEM;
470 op.dst.u.dma.address = wa->dma.address; 470 op.dst.u.dma.address = wa->dma.address;
471 op.dst.u.dma.length = wa->length; 471 op.dst.u.dma.length = wa->length;
@@ -473,8 +473,8 @@ static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q,
473 op.src.type = CCP_MEMTYPE_SYSTEM; 473 op.src.type = CCP_MEMTYPE_SYSTEM;
474 op.src.u.dma.address = wa->dma.address; 474 op.src.u.dma.address = wa->dma.address;
475 op.src.u.dma.length = wa->length; 475 op.src.u.dma.length = wa->length;
476 op.dst.type = CCP_MEMTYPE_KSB; 476 op.dst.type = CCP_MEMTYPE_SB;
477 op.dst.u.ksb = ksb; 477 op.dst.u.sb = sb;
478 } 478 }
479 479
480 op.u.passthru.byte_swap = byte_swap; 480 op.u.passthru.byte_swap = byte_swap;
@@ -482,18 +482,18 @@ static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q,
482 return cmd_q->ccp->vdata->perform->passthru(&op); 482 return cmd_q->ccp->vdata->perform->passthru(&op);
483} 483}
484 484
485static int ccp_copy_to_ksb(struct ccp_cmd_queue *cmd_q, 485static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
486 struct ccp_dm_workarea *wa, u32 jobid, u32 ksb, 486 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
487 u32 byte_swap) 487 u32 byte_swap)
488{ 488{
489 return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, false); 489 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
490} 490}
491 491
492static int ccp_copy_from_ksb(struct ccp_cmd_queue *cmd_q, 492static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
493 struct ccp_dm_workarea *wa, u32 jobid, u32 ksb, 493 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
494 u32 byte_swap) 494 u32 byte_swap)
495{ 495{
496 return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, true); 496 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
497} 497}
498 498
499static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, 499static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
@@ -528,54 +528,54 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
528 return -EINVAL; 528 return -EINVAL;
529 } 529 }
530 530
531 BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1); 531 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
532 BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1); 532 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
533 533
534 ret = -EIO; 534 ret = -EIO;
535 memset(&op, 0, sizeof(op)); 535 memset(&op, 0, sizeof(op));
536 op.cmd_q = cmd_q; 536 op.cmd_q = cmd_q;
537 op.jobid = ccp_gen_jobid(cmd_q->ccp); 537 op.jobid = ccp_gen_jobid(cmd_q->ccp);
538 op.ksb_key = cmd_q->ksb_key; 538 op.sb_key = cmd_q->sb_key;
539 op.ksb_ctx = cmd_q->ksb_ctx; 539 op.sb_ctx = cmd_q->sb_ctx;
540 op.init = 1; 540 op.init = 1;
541 op.u.aes.type = aes->type; 541 op.u.aes.type = aes->type;
542 op.u.aes.mode = aes->mode; 542 op.u.aes.mode = aes->mode;
543 op.u.aes.action = aes->action; 543 op.u.aes.action = aes->action;
544 544
545 /* All supported key sizes fit in a single (32-byte) KSB entry 545 /* All supported key sizes fit in a single (32-byte) SB entry
546 * and must be in little endian format. Use the 256-bit byte 546 * and must be in little endian format. Use the 256-bit byte
547 * swap passthru option to convert from big endian to little 547 * swap passthru option to convert from big endian to little
548 * endian. 548 * endian.
549 */ 549 */
550 ret = ccp_init_dm_workarea(&key, cmd_q, 550 ret = ccp_init_dm_workarea(&key, cmd_q,
551 CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES, 551 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
552 DMA_TO_DEVICE); 552 DMA_TO_DEVICE);
553 if (ret) 553 if (ret)
554 return ret; 554 return ret;
555 555
556 dm_offset = CCP_KSB_BYTES - aes->key_len; 556 dm_offset = CCP_SB_BYTES - aes->key_len;
557 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); 557 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
558 ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key, 558 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
559 CCP_PASSTHRU_BYTESWAP_256BIT); 559 CCP_PASSTHRU_BYTESWAP_256BIT);
560 if (ret) { 560 if (ret) {
561 cmd->engine_error = cmd_q->cmd_error; 561 cmd->engine_error = cmd_q->cmd_error;
562 goto e_key; 562 goto e_key;
563 } 563 }
564 564
565 /* The AES context fits in a single (32-byte) KSB entry and 565 /* The AES context fits in a single (32-byte) SB entry and
566 * must be in little endian format. Use the 256-bit byte swap 566 * must be in little endian format. Use the 256-bit byte swap
567 * passthru option to convert from big endian to little endian. 567 * passthru option to convert from big endian to little endian.
568 */ 568 */
569 ret = ccp_init_dm_workarea(&ctx, cmd_q, 569 ret = ccp_init_dm_workarea(&ctx, cmd_q,
570 CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES, 570 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
571 DMA_BIDIRECTIONAL); 571 DMA_BIDIRECTIONAL);
572 if (ret) 572 if (ret)
573 goto e_key; 573 goto e_key;
574 574
575 dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; 575 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
576 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 576 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
577 ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, 577 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
578 CCP_PASSTHRU_BYTESWAP_256BIT); 578 CCP_PASSTHRU_BYTESWAP_256BIT);
579 if (ret) { 579 if (ret) {
580 cmd->engine_error = cmd_q->cmd_error; 580 cmd->engine_error = cmd_q->cmd_error;
581 goto e_ctx; 581 goto e_ctx;
@@ -593,9 +593,9 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
593 op.eom = 1; 593 op.eom = 1;
594 594
595 /* Push the K1/K2 key to the CCP now */ 595 /* Push the K1/K2 key to the CCP now */
596 ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, 596 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid,
597 op.ksb_ctx, 597 op.sb_ctx,
598 CCP_PASSTHRU_BYTESWAP_256BIT); 598 CCP_PASSTHRU_BYTESWAP_256BIT);
599 if (ret) { 599 if (ret) {
600 cmd->engine_error = cmd_q->cmd_error; 600 cmd->engine_error = cmd_q->cmd_error;
601 goto e_src; 601 goto e_src;
@@ -603,8 +603,8 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
603 603
604 ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0, 604 ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
605 aes->cmac_key_len); 605 aes->cmac_key_len);
606 ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, 606 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
607 CCP_PASSTHRU_BYTESWAP_256BIT); 607 CCP_PASSTHRU_BYTESWAP_256BIT);
608 if (ret) { 608 if (ret) {
609 cmd->engine_error = cmd_q->cmd_error; 609 cmd->engine_error = cmd_q->cmd_error;
610 goto e_src; 610 goto e_src;
@@ -623,15 +623,15 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
623 /* Retrieve the AES context - convert from LE to BE using 623 /* Retrieve the AES context - convert from LE to BE using
624 * 32-byte (256-bit) byteswapping 624 * 32-byte (256-bit) byteswapping
625 */ 625 */
626 ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, 626 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
627 CCP_PASSTHRU_BYTESWAP_256BIT); 627 CCP_PASSTHRU_BYTESWAP_256BIT);
628 if (ret) { 628 if (ret) {
629 cmd->engine_error = cmd_q->cmd_error; 629 cmd->engine_error = cmd_q->cmd_error;
630 goto e_src; 630 goto e_src;
631 } 631 }
632 632
633 /* ...but we only need AES_BLOCK_SIZE bytes */ 633 /* ...but we only need AES_BLOCK_SIZE bytes */
634 dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; 634 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
635 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 635 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
636 636
637e_src: 637e_src:
@@ -681,56 +681,56 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
681 return -EINVAL; 681 return -EINVAL;
682 } 682 }
683 683
684 BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1); 684 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
685 BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1); 685 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
686 686
687 ret = -EIO; 687 ret = -EIO;
688 memset(&op, 0, sizeof(op)); 688 memset(&op, 0, sizeof(op));
689 op.cmd_q = cmd_q; 689 op.cmd_q = cmd_q;
690 op.jobid = ccp_gen_jobid(cmd_q->ccp); 690 op.jobid = ccp_gen_jobid(cmd_q->ccp);
691 op.ksb_key = cmd_q->ksb_key; 691 op.sb_key = cmd_q->sb_key;
692 op.ksb_ctx = cmd_q->ksb_ctx; 692 op.sb_ctx = cmd_q->sb_ctx;
693 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1; 693 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
694 op.u.aes.type = aes->type; 694 op.u.aes.type = aes->type;
695 op.u.aes.mode = aes->mode; 695 op.u.aes.mode = aes->mode;
696 op.u.aes.action = aes->action; 696 op.u.aes.action = aes->action;
697 697
698 /* All supported key sizes fit in a single (32-byte) KSB entry 698 /* All supported key sizes fit in a single (32-byte) SB entry
699 * and must be in little endian format. Use the 256-bit byte 699 * and must be in little endian format. Use the 256-bit byte
700 * swap passthru option to convert from big endian to little 700 * swap passthru option to convert from big endian to little
701 * endian. 701 * endian.
702 */ 702 */
703 ret = ccp_init_dm_workarea(&key, cmd_q, 703 ret = ccp_init_dm_workarea(&key, cmd_q,
704 CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES, 704 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
705 DMA_TO_DEVICE); 705 DMA_TO_DEVICE);
706 if (ret) 706 if (ret)
707 return ret; 707 return ret;
708 708
709 dm_offset = CCP_KSB_BYTES - aes->key_len; 709 dm_offset = CCP_SB_BYTES - aes->key_len;
710 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); 710 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
711 ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key, 711 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
712 CCP_PASSTHRU_BYTESWAP_256BIT); 712 CCP_PASSTHRU_BYTESWAP_256BIT);
713 if (ret) { 713 if (ret) {
714 cmd->engine_error = cmd_q->cmd_error; 714 cmd->engine_error = cmd_q->cmd_error;
715 goto e_key; 715 goto e_key;
716 } 716 }
717 717
718 /* The AES context fits in a single (32-byte) KSB entry and 718 /* The AES context fits in a single (32-byte) SB entry and
719 * must be in little endian format. Use the 256-bit byte swap 719 * must be in little endian format. Use the 256-bit byte swap
720 * passthru option to convert from big endian to little endian. 720 * passthru option to convert from big endian to little endian.
721 */ 721 */
722 ret = ccp_init_dm_workarea(&ctx, cmd_q, 722 ret = ccp_init_dm_workarea(&ctx, cmd_q,
723 CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES, 723 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
724 DMA_BIDIRECTIONAL); 724 DMA_BIDIRECTIONAL);
725 if (ret) 725 if (ret)
726 goto e_key; 726 goto e_key;
727 727
728 if (aes->mode != CCP_AES_MODE_ECB) { 728 if (aes->mode != CCP_AES_MODE_ECB) {
729 /* Load the AES context - conver to LE */ 729 /* Load the AES context - conver to LE */
730 dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; 730 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
731 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 731 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
732 ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, 732 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
733 CCP_PASSTHRU_BYTESWAP_256BIT); 733 CCP_PASSTHRU_BYTESWAP_256BIT);
734 if (ret) { 734 if (ret) {
735 cmd->engine_error = cmd_q->cmd_error; 735 cmd->engine_error = cmd_q->cmd_error;
736 goto e_ctx; 736 goto e_ctx;
@@ -786,15 +786,15 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
786 /* Retrieve the AES context - convert from LE to BE using 786 /* Retrieve the AES context - convert from LE to BE using
787 * 32-byte (256-bit) byteswapping 787 * 32-byte (256-bit) byteswapping
788 */ 788 */
789 ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, 789 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
790 CCP_PASSTHRU_BYTESWAP_256BIT); 790 CCP_PASSTHRU_BYTESWAP_256BIT);
791 if (ret) { 791 if (ret) {
792 cmd->engine_error = cmd_q->cmd_error; 792 cmd->engine_error = cmd_q->cmd_error;
793 goto e_dst; 793 goto e_dst;
794 } 794 }
795 795
796 /* ...but we only need AES_BLOCK_SIZE bytes */ 796 /* ...but we only need AES_BLOCK_SIZE bytes */
797 dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; 797 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
798 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 798 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
799 } 799 }
800 800
@@ -858,53 +858,53 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
858 if (!xts->key || !xts->iv || !xts->src || !xts->dst) 858 if (!xts->key || !xts->iv || !xts->src || !xts->dst)
859 return -EINVAL; 859 return -EINVAL;
860 860
861 BUILD_BUG_ON(CCP_XTS_AES_KEY_KSB_COUNT != 1); 861 BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1);
862 BUILD_BUG_ON(CCP_XTS_AES_CTX_KSB_COUNT != 1); 862 BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1);
863 863
864 ret = -EIO; 864 ret = -EIO;
865 memset(&op, 0, sizeof(op)); 865 memset(&op, 0, sizeof(op));
866 op.cmd_q = cmd_q; 866 op.cmd_q = cmd_q;
867 op.jobid = ccp_gen_jobid(cmd_q->ccp); 867 op.jobid = ccp_gen_jobid(cmd_q->ccp);
868 op.ksb_key = cmd_q->ksb_key; 868 op.sb_key = cmd_q->sb_key;
869 op.ksb_ctx = cmd_q->ksb_ctx; 869 op.sb_ctx = cmd_q->sb_ctx;
870 op.init = 1; 870 op.init = 1;
871 op.u.xts.action = xts->action; 871 op.u.xts.action = xts->action;
872 op.u.xts.unit_size = xts->unit_size; 872 op.u.xts.unit_size = xts->unit_size;
873 873
874 /* All supported key sizes fit in a single (32-byte) KSB entry 874 /* All supported key sizes fit in a single (32-byte) SB entry
875 * and must be in little endian format. Use the 256-bit byte 875 * and must be in little endian format. Use the 256-bit byte
876 * swap passthru option to convert from big endian to little 876 * swap passthru option to convert from big endian to little
877 * endian. 877 * endian.
878 */ 878 */
879 ret = ccp_init_dm_workarea(&key, cmd_q, 879 ret = ccp_init_dm_workarea(&key, cmd_q,
880 CCP_XTS_AES_KEY_KSB_COUNT * CCP_KSB_BYTES, 880 CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES,
881 DMA_TO_DEVICE); 881 DMA_TO_DEVICE);
882 if (ret) 882 if (ret)
883 return ret; 883 return ret;
884 884
885 dm_offset = CCP_KSB_BYTES - AES_KEYSIZE_128; 885 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
886 ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); 886 ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
887 ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len); 887 ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
888 ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key, 888 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
889 CCP_PASSTHRU_BYTESWAP_256BIT); 889 CCP_PASSTHRU_BYTESWAP_256BIT);
890 if (ret) { 890 if (ret) {
891 cmd->engine_error = cmd_q->cmd_error; 891 cmd->engine_error = cmd_q->cmd_error;
892 goto e_key; 892 goto e_key;
893 } 893 }
894 894
895 /* The AES context fits in a single (32-byte) KSB entry and 895 /* The AES context fits in a single (32-byte) SB entry and
896 * for XTS is already in little endian format so no byte swapping 896 * for XTS is already in little endian format so no byte swapping
897 * is needed. 897 * is needed.
898 */ 898 */
899 ret = ccp_init_dm_workarea(&ctx, cmd_q, 899 ret = ccp_init_dm_workarea(&ctx, cmd_q,
900 CCP_XTS_AES_CTX_KSB_COUNT * CCP_KSB_BYTES, 900 CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES,
901 DMA_BIDIRECTIONAL); 901 DMA_BIDIRECTIONAL);
902 if (ret) 902 if (ret)
903 goto e_key; 903 goto e_key;
904 904
905 ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len); 905 ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
906 ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, 906 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
907 CCP_PASSTHRU_BYTESWAP_NOOP); 907 CCP_PASSTHRU_BYTESWAP_NOOP);
908 if (ret) { 908 if (ret) {
909 cmd->engine_error = cmd_q->cmd_error; 909 cmd->engine_error = cmd_q->cmd_error;
910 goto e_ctx; 910 goto e_ctx;
@@ -950,15 +950,15 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
950 /* Retrieve the AES context - convert from LE to BE using 950 /* Retrieve the AES context - convert from LE to BE using
951 * 32-byte (256-bit) byteswapping 951 * 32-byte (256-bit) byteswapping
952 */ 952 */
953 ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, 953 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
954 CCP_PASSTHRU_BYTESWAP_256BIT); 954 CCP_PASSTHRU_BYTESWAP_256BIT);
955 if (ret) { 955 if (ret) {
956 cmd->engine_error = cmd_q->cmd_error; 956 cmd->engine_error = cmd_q->cmd_error;
957 goto e_dst; 957 goto e_dst;
958 } 958 }
959 959
960 /* ...but we only need AES_BLOCK_SIZE bytes */ 960 /* ...but we only need AES_BLOCK_SIZE bytes */
961 dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; 961 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
962 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len); 962 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
963 963
964e_dst: 964e_dst:
@@ -1036,21 +1036,21 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1036 if (!sha->src) 1036 if (!sha->src)
1037 return -EINVAL; 1037 return -EINVAL;
1038 1038
1039 BUILD_BUG_ON(CCP_SHA_KSB_COUNT != 1); 1039 BUILD_BUG_ON(CCP_SHA_SB_COUNT != 1);
1040 1040
1041 memset(&op, 0, sizeof(op)); 1041 memset(&op, 0, sizeof(op));
1042 op.cmd_q = cmd_q; 1042 op.cmd_q = cmd_q;
1043 op.jobid = ccp_gen_jobid(cmd_q->ccp); 1043 op.jobid = ccp_gen_jobid(cmd_q->ccp);
1044 op.ksb_ctx = cmd_q->ksb_ctx; 1044 op.sb_ctx = cmd_q->sb_ctx;
1045 op.u.sha.type = sha->type; 1045 op.u.sha.type = sha->type;
1046 op.u.sha.msg_bits = sha->msg_bits; 1046 op.u.sha.msg_bits = sha->msg_bits;
1047 1047
1048 /* The SHA context fits in a single (32-byte) KSB entry and 1048 /* The SHA context fits in a single (32-byte) SB entry and
1049 * must be in little endian format. Use the 256-bit byte swap 1049 * must be in little endian format. Use the 256-bit byte swap
1050 * passthru option to convert from big endian to little endian. 1050 * passthru option to convert from big endian to little endian.
1051 */ 1051 */
1052 ret = ccp_init_dm_workarea(&ctx, cmd_q, 1052 ret = ccp_init_dm_workarea(&ctx, cmd_q,
1053 CCP_SHA_KSB_COUNT * CCP_KSB_BYTES, 1053 CCP_SHA_SB_COUNT * CCP_SB_BYTES,
1054 DMA_BIDIRECTIONAL); 1054 DMA_BIDIRECTIONAL);
1055 if (ret) 1055 if (ret)
1056 return ret; 1056 return ret;
@@ -1077,8 +1077,8 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1077 ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); 1077 ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
1078 } 1078 }
1079 1079
1080 ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, 1080 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1081 CCP_PASSTHRU_BYTESWAP_256BIT); 1081 CCP_PASSTHRU_BYTESWAP_256BIT);
1082 if (ret) { 1082 if (ret) {
1083 cmd->engine_error = cmd_q->cmd_error; 1083 cmd->engine_error = cmd_q->cmd_error;
1084 goto e_ctx; 1084 goto e_ctx;
@@ -1107,8 +1107,8 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1107 /* Retrieve the SHA context - convert from LE to BE using 1107 /* Retrieve the SHA context - convert from LE to BE using
1108 * 32-byte (256-bit) byteswapping to BE 1108 * 32-byte (256-bit) byteswapping to BE
1109 */ 1109 */
1110 ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, 1110 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1111 CCP_PASSTHRU_BYTESWAP_256BIT); 1111 CCP_PASSTHRU_BYTESWAP_256BIT);
1112 if (ret) { 1112 if (ret) {
1113 cmd->engine_error = cmd_q->cmd_error; 1113 cmd->engine_error = cmd_q->cmd_error;
1114 goto e_data; 1114 goto e_data;
@@ -1191,7 +1191,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1191 struct ccp_dm_workarea exp, src; 1191 struct ccp_dm_workarea exp, src;
1192 struct ccp_data dst; 1192 struct ccp_data dst;
1193 struct ccp_op op; 1193 struct ccp_op op;
1194 unsigned int ksb_count, i_len, o_len; 1194 unsigned int sb_count, i_len, o_len;
1195 int ret; 1195 int ret;
1196 1196
1197 if (rsa->key_size > CCP_RSA_MAX_WIDTH) 1197 if (rsa->key_size > CCP_RSA_MAX_WIDTH)
@@ -1209,16 +1209,16 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1209 o_len = ((rsa->key_size + 255) / 256) * 32; 1209 o_len = ((rsa->key_size + 255) / 256) * 32;
1210 i_len = o_len * 2; 1210 i_len = o_len * 2;
1211 1211
1212 ksb_count = o_len / CCP_KSB_BYTES; 1212 sb_count = o_len / CCP_SB_BYTES;
1213 1213
1214 memset(&op, 0, sizeof(op)); 1214 memset(&op, 0, sizeof(op));
1215 op.cmd_q = cmd_q; 1215 op.cmd_q = cmd_q;
1216 op.jobid = ccp_gen_jobid(cmd_q->ccp); 1216 op.jobid = ccp_gen_jobid(cmd_q->ccp);
1217 op.ksb_key = ccp_alloc_ksb(cmd_q->ccp, ksb_count); 1217 op.sb_key = ccp_alloc_ksb(cmd_q->ccp, sb_count);
1218 if (!op.ksb_key) 1218 if (!op.sb_key)
1219 return -EIO; 1219 return -EIO;
1220 1220
1221 /* The RSA exponent may span multiple (32-byte) KSB entries and must 1221 /* The RSA exponent may span multiple (32-byte) SB entries and must
1222 * be in little endian format. Reverse copy each 32-byte chunk 1222 * be in little endian format. Reverse copy each 32-byte chunk
1223 * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk) 1223 * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
1224 * and each byte within that chunk and do not perform any byte swap 1224 * and each byte within that chunk and do not perform any byte swap
@@ -1226,14 +1226,14 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1226 */ 1226 */
1227 ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE); 1227 ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
1228 if (ret) 1228 if (ret)
1229 goto e_ksb; 1229 goto e_sb;
1230 1230
1231 ret = ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len, 1231 ret = ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len,
1232 CCP_KSB_BYTES, false); 1232 CCP_SB_BYTES, false);
1233 if (ret) 1233 if (ret)
1234 goto e_exp; 1234 goto e_exp;
1235 ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key, 1235 ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
1236 CCP_PASSTHRU_BYTESWAP_NOOP); 1236 CCP_PASSTHRU_BYTESWAP_NOOP);
1237 if (ret) { 1237 if (ret) {
1238 cmd->engine_error = cmd_q->cmd_error; 1238 cmd->engine_error = cmd_q->cmd_error;
1239 goto e_exp; 1239 goto e_exp;
@@ -1248,12 +1248,12 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1248 goto e_exp; 1248 goto e_exp;
1249 1249
1250 ret = ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len, 1250 ret = ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len,
1251 CCP_KSB_BYTES, false); 1251 CCP_SB_BYTES, false);
1252 if (ret) 1252 if (ret)
1253 goto e_src; 1253 goto e_src;
1254 src.address += o_len; /* Adjust the address for the copy operation */ 1254 src.address += o_len; /* Adjust the address for the copy operation */
1255 ret = ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len, 1255 ret = ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len,
1256 CCP_KSB_BYTES, false); 1256 CCP_SB_BYTES, false);
1257 if (ret) 1257 if (ret)
1258 goto e_src; 1258 goto e_src;
1259 src.address -= o_len; /* Reset the address to original value */ 1259 src.address -= o_len; /* Reset the address to original value */
@@ -1292,8 +1292,8 @@ e_src:
1292e_exp: 1292e_exp:
1293 ccp_dm_free(&exp); 1293 ccp_dm_free(&exp);
1294 1294
1295e_ksb: 1295e_sb:
1296 ccp_free_ksb(cmd_q->ccp, op.ksb_key, ksb_count); 1296 ccp_free_ksb(cmd_q->ccp, op.sb_key, sb_count);
1297 1297
1298 return ret; 1298 return ret;
1299} 1299}
@@ -1322,7 +1322,7 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
1322 return -EINVAL; 1322 return -EINVAL;
1323 } 1323 }
1324 1324
1325 BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1); 1325 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
1326 1326
1327 memset(&op, 0, sizeof(op)); 1327 memset(&op, 0, sizeof(op));
1328 op.cmd_q = cmd_q; 1328 op.cmd_q = cmd_q;
@@ -1330,18 +1330,18 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
1330 1330
1331 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { 1331 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1332 /* Load the mask */ 1332 /* Load the mask */
1333 op.ksb_key = cmd_q->ksb_key; 1333 op.sb_key = cmd_q->sb_key;
1334 1334
1335 ret = ccp_init_dm_workarea(&mask, cmd_q, 1335 ret = ccp_init_dm_workarea(&mask, cmd_q,
1336 CCP_PASSTHRU_KSB_COUNT * 1336 CCP_PASSTHRU_SB_COUNT *
1337 CCP_KSB_BYTES, 1337 CCP_SB_BYTES,
1338 DMA_TO_DEVICE); 1338 DMA_TO_DEVICE);
1339 if (ret) 1339 if (ret)
1340 return ret; 1340 return ret;
1341 1341
1342 ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len); 1342 ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
1343 ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key, 1343 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
1344 CCP_PASSTHRU_BYTESWAP_NOOP); 1344 CCP_PASSTHRU_BYTESWAP_NOOP);
1345 if (ret) { 1345 if (ret) {
1346 cmd->engine_error = cmd_q->cmd_error; 1346 cmd->engine_error = cmd_q->cmd_error;
1347 goto e_mask; 1347 goto e_mask;
@@ -1449,7 +1449,7 @@ static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
1449 return -EINVAL; 1449 return -EINVAL;
1450 } 1450 }
1451 1451
1452 BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1); 1452 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
1453 1453
1454 memset(&op, 0, sizeof(op)); 1454 memset(&op, 0, sizeof(op));
1455 op.cmd_q = cmd_q; 1455 op.cmd_q = cmd_q;
@@ -1457,13 +1457,13 @@ static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
1457 1457
1458 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { 1458 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1459 /* Load the mask */ 1459 /* Load the mask */
1460 op.ksb_key = cmd_q->ksb_key; 1460 op.sb_key = cmd_q->sb_key;
1461 1461
1462 mask.length = pt->mask_len; 1462 mask.length = pt->mask_len;
1463 mask.dma.address = pt->mask; 1463 mask.dma.address = pt->mask;
1464 mask.dma.length = pt->mask_len; 1464 mask.dma.length = pt->mask_len;
1465 1465
1466 ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key, 1466 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
1467 CCP_PASSTHRU_BYTESWAP_NOOP); 1467 CCP_PASSTHRU_BYTESWAP_NOOP);
1468 if (ret) { 1468 if (ret) {
1469 cmd->engine_error = cmd_q->cmd_error; 1469 cmd->engine_error = cmd_q->cmd_error;