diff options
author | Tom Lendacky <thomas.lendacky@amd.com> | 2015-02-03 14:07:05 -0500 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2015-02-27 04:48:47 -0500 |
commit | 8db8846754767bc955eaf7e28db8a94787d12ce6 (patch) | |
tree | 2ddbaa25a8e6213f05e7d8cd0bfbbc6ddf62307a | |
parent | 2ecc1e95ec70923e642fa481ee7f7ad443798f2a (diff) |
crypto: ccp - Updates for checkpatch warnings/errors
Changes to address warnings and errors reported by the checkpatch
script.
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r-- | drivers/crypto/ccp/ccp-crypto-aes-cmac.c | 12 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-crypto-aes-xts.c | 4 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-crypto-aes.c | 3 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-crypto-main.c | 5 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-crypto-sha.c | 12 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-crypto.h | 3 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-dev.c | 5 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-dev.h | 12 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-ops.c | 24 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-pci.c | 2 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-platform.c | 1 |
11 files changed, 36 insertions, 47 deletions
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c index 8e162ad82085..ea7e8446956a 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c | |||
@@ -23,7 +23,6 @@ | |||
23 | 23 | ||
24 | #include "ccp-crypto.h" | 24 | #include "ccp-crypto.h" |
25 | 25 | ||
26 | |||
27 | static int ccp_aes_cmac_complete(struct crypto_async_request *async_req, | 26 | static int ccp_aes_cmac_complete(struct crypto_async_request *async_req, |
28 | int ret) | 27 | int ret) |
29 | { | 28 | { |
@@ -38,11 +37,13 @@ static int ccp_aes_cmac_complete(struct crypto_async_request *async_req, | |||
38 | if (rctx->hash_rem) { | 37 | if (rctx->hash_rem) { |
39 | /* Save remaining data to buffer */ | 38 | /* Save remaining data to buffer */ |
40 | unsigned int offset = rctx->nbytes - rctx->hash_rem; | 39 | unsigned int offset = rctx->nbytes - rctx->hash_rem; |
40 | |||
41 | scatterwalk_map_and_copy(rctx->buf, rctx->src, | 41 | scatterwalk_map_and_copy(rctx->buf, rctx->src, |
42 | offset, rctx->hash_rem, 0); | 42 | offset, rctx->hash_rem, 0); |
43 | rctx->buf_count = rctx->hash_rem; | 43 | rctx->buf_count = rctx->hash_rem; |
44 | } else | 44 | } else { |
45 | rctx->buf_count = 0; | 45 | rctx->buf_count = 0; |
46 | } | ||
46 | 47 | ||
47 | /* Update result area if supplied */ | 48 | /* Update result area if supplied */ |
48 | if (req->result) | 49 | if (req->result) |
@@ -202,7 +203,7 @@ static int ccp_aes_cmac_digest(struct ahash_request *req) | |||
202 | } | 203 | } |
203 | 204 | ||
204 | static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, | 205 | static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, |
205 | unsigned int key_len) | 206 | unsigned int key_len) |
206 | { | 207 | { |
207 | struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); | 208 | struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); |
208 | struct ccp_crypto_ahash_alg *alg = | 209 | struct ccp_crypto_ahash_alg *alg = |
@@ -292,7 +293,8 @@ static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm) | |||
292 | crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx)); | 293 | crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx)); |
293 | 294 | ||
294 | cipher_tfm = crypto_alloc_cipher("aes", 0, | 295 | cipher_tfm = crypto_alloc_cipher("aes", 0, |
295 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | 296 | CRYPTO_ALG_ASYNC | |
297 | CRYPTO_ALG_NEED_FALLBACK); | ||
296 | if (IS_ERR(cipher_tfm)) { | 298 | if (IS_ERR(cipher_tfm)) { |
297 | pr_warn("could not load aes cipher driver\n"); | 299 | pr_warn("could not load aes cipher driver\n"); |
298 | return PTR_ERR(cipher_tfm); | 300 | return PTR_ERR(cipher_tfm); |
@@ -354,7 +356,7 @@ int ccp_register_aes_cmac_algs(struct list_head *head) | |||
354 | ret = crypto_register_ahash(alg); | 356 | ret = crypto_register_ahash(alg); |
355 | if (ret) { | 357 | if (ret) { |
356 | pr_err("%s ahash algorithm registration error (%d)\n", | 358 | pr_err("%s ahash algorithm registration error (%d)\n", |
357 | base->cra_name, ret); | 359 | base->cra_name, ret); |
358 | kfree(ccp_alg); | 360 | kfree(ccp_alg); |
359 | return ret; | 361 | return ret; |
360 | } | 362 | } |
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c index 0cc5594b7de3..52c7395cb8d8 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c | |||
@@ -21,7 +21,6 @@ | |||
21 | 21 | ||
22 | #include "ccp-crypto.h" | 22 | #include "ccp-crypto.h" |
23 | 23 | ||
24 | |||
25 | struct ccp_aes_xts_def { | 24 | struct ccp_aes_xts_def { |
26 | const char *name; | 25 | const char *name; |
27 | const char *drv_name; | 26 | const char *drv_name; |
@@ -216,7 +215,6 @@ static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm) | |||
216 | ctx->u.aes.tfm_ablkcipher = NULL; | 215 | ctx->u.aes.tfm_ablkcipher = NULL; |
217 | } | 216 | } |
218 | 217 | ||
219 | |||
220 | static int ccp_register_aes_xts_alg(struct list_head *head, | 218 | static int ccp_register_aes_xts_alg(struct list_head *head, |
221 | const struct ccp_aes_xts_def *def) | 219 | const struct ccp_aes_xts_def *def) |
222 | { | 220 | { |
@@ -255,7 +253,7 @@ static int ccp_register_aes_xts_alg(struct list_head *head, | |||
255 | ret = crypto_register_alg(alg); | 253 | ret = crypto_register_alg(alg); |
256 | if (ret) { | 254 | if (ret) { |
257 | pr_err("%s ablkcipher algorithm registration error (%d)\n", | 255 | pr_err("%s ablkcipher algorithm registration error (%d)\n", |
258 | alg->cra_name, ret); | 256 | alg->cra_name, ret); |
259 | kfree(ccp_alg); | 257 | kfree(ccp_alg); |
260 | return ret; | 258 | return ret; |
261 | } | 259 | } |
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c index e46490db0f63..7984f910884d 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes.c +++ b/drivers/crypto/ccp/ccp-crypto-aes.c | |||
@@ -22,7 +22,6 @@ | |||
22 | 22 | ||
23 | #include "ccp-crypto.h" | 23 | #include "ccp-crypto.h" |
24 | 24 | ||
25 | |||
26 | static int ccp_aes_complete(struct crypto_async_request *async_req, int ret) | 25 | static int ccp_aes_complete(struct crypto_async_request *async_req, int ret) |
27 | { | 26 | { |
28 | struct ablkcipher_request *req = ablkcipher_request_cast(async_req); | 27 | struct ablkcipher_request *req = ablkcipher_request_cast(async_req); |
@@ -345,7 +344,7 @@ static int ccp_register_aes_alg(struct list_head *head, | |||
345 | ret = crypto_register_alg(alg); | 344 | ret = crypto_register_alg(alg); |
346 | if (ret) { | 345 | if (ret) { |
347 | pr_err("%s ablkcipher algorithm registration error (%d)\n", | 346 | pr_err("%s ablkcipher algorithm registration error (%d)\n", |
348 | alg->cra_name, ret); | 347 | alg->cra_name, ret); |
349 | kfree(ccp_alg); | 348 | kfree(ccp_alg); |
350 | return ret; | 349 | return ret; |
351 | } | 350 | } |
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 4d4e016d755b..bdec01ec608f 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c | |||
@@ -33,7 +33,6 @@ static unsigned int sha_disable; | |||
33 | module_param(sha_disable, uint, 0444); | 33 | module_param(sha_disable, uint, 0444); |
34 | MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value"); | 34 | MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value"); |
35 | 35 | ||
36 | |||
37 | /* List heads for the supported algorithms */ | 36 | /* List heads for the supported algorithms */ |
38 | static LIST_HEAD(hash_algs); | 37 | static LIST_HEAD(hash_algs); |
39 | static LIST_HEAD(cipher_algs); | 38 | static LIST_HEAD(cipher_algs); |
@@ -48,6 +47,7 @@ struct ccp_crypto_queue { | |||
48 | struct list_head *backlog; | 47 | struct list_head *backlog; |
49 | unsigned int cmd_count; | 48 | unsigned int cmd_count; |
50 | }; | 49 | }; |
50 | |||
51 | #define CCP_CRYPTO_MAX_QLEN 100 | 51 | #define CCP_CRYPTO_MAX_QLEN 100 |
52 | 52 | ||
53 | static struct ccp_crypto_queue req_queue; | 53 | static struct ccp_crypto_queue req_queue; |
@@ -77,7 +77,6 @@ struct ccp_crypto_cpu { | |||
77 | int err; | 77 | int err; |
78 | }; | 78 | }; |
79 | 79 | ||
80 | |||
81 | static inline bool ccp_crypto_success(int err) | 80 | static inline bool ccp_crypto_success(int err) |
82 | { | 81 | { |
83 | if (err && (err != -EINPROGRESS) && (err != -EBUSY)) | 82 | if (err && (err != -EINPROGRESS) && (err != -EBUSY)) |
@@ -143,7 +142,7 @@ static void ccp_crypto_complete(void *data, int err) | |||
143 | int ret; | 142 | int ret; |
144 | 143 | ||
145 | if (err == -EINPROGRESS) { | 144 | if (err == -EINPROGRESS) { |
146 | /* Only propogate the -EINPROGRESS if necessary */ | 145 | /* Only propagate the -EINPROGRESS if necessary */ |
147 | if (crypto_cmd->ret == -EBUSY) { | 146 | if (crypto_cmd->ret == -EBUSY) { |
148 | crypto_cmd->ret = -EINPROGRESS; | 147 | crypto_cmd->ret = -EINPROGRESS; |
149 | req->complete(req, -EINPROGRESS); | 148 | req->complete(req, -EINPROGRESS); |
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index 96531571f7cf..507b34e0cc19 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c | |||
@@ -23,7 +23,6 @@ | |||
23 | 23 | ||
24 | #include "ccp-crypto.h" | 24 | #include "ccp-crypto.h" |
25 | 25 | ||
26 | |||
27 | static int ccp_sha_complete(struct crypto_async_request *async_req, int ret) | 26 | static int ccp_sha_complete(struct crypto_async_request *async_req, int ret) |
28 | { | 27 | { |
29 | struct ahash_request *req = ahash_request_cast(async_req); | 28 | struct ahash_request *req = ahash_request_cast(async_req); |
@@ -37,11 +36,13 @@ static int ccp_sha_complete(struct crypto_async_request *async_req, int ret) | |||
37 | if (rctx->hash_rem) { | 36 | if (rctx->hash_rem) { |
38 | /* Save remaining data to buffer */ | 37 | /* Save remaining data to buffer */ |
39 | unsigned int offset = rctx->nbytes - rctx->hash_rem; | 38 | unsigned int offset = rctx->nbytes - rctx->hash_rem; |
39 | |||
40 | scatterwalk_map_and_copy(rctx->buf, rctx->src, | 40 | scatterwalk_map_and_copy(rctx->buf, rctx->src, |
41 | offset, rctx->hash_rem, 0); | 41 | offset, rctx->hash_rem, 0); |
42 | rctx->buf_count = rctx->hash_rem; | 42 | rctx->buf_count = rctx->hash_rem; |
43 | } else | 43 | } else { |
44 | rctx->buf_count = 0; | 44 | rctx->buf_count = 0; |
45 | } | ||
45 | 46 | ||
46 | /* Update result area if supplied */ | 47 | /* Update result area if supplied */ |
47 | if (req->result) | 48 | if (req->result) |
@@ -227,8 +228,9 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
227 | } | 228 | } |
228 | 229 | ||
229 | key_len = digest_size; | 230 | key_len = digest_size; |
230 | } else | 231 | } else { |
231 | memcpy(ctx->u.sha.key, key, key_len); | 232 | memcpy(ctx->u.sha.key, key, key_len); |
233 | } | ||
232 | 234 | ||
233 | for (i = 0; i < block_size; i++) { | 235 | for (i = 0; i < block_size; i++) { |
234 | ctx->u.sha.ipad[i] = ctx->u.sha.key[i] ^ 0x36; | 236 | ctx->u.sha.ipad[i] = ctx->u.sha.key[i] ^ 0x36; |
@@ -355,7 +357,7 @@ static int ccp_register_hmac_alg(struct list_head *head, | |||
355 | ret = crypto_register_ahash(alg); | 357 | ret = crypto_register_ahash(alg); |
356 | if (ret) { | 358 | if (ret) { |
357 | pr_err("%s ahash algorithm registration error (%d)\n", | 359 | pr_err("%s ahash algorithm registration error (%d)\n", |
358 | base->cra_name, ret); | 360 | base->cra_name, ret); |
359 | kfree(ccp_alg); | 361 | kfree(ccp_alg); |
360 | return ret; | 362 | return ret; |
361 | } | 363 | } |
@@ -410,7 +412,7 @@ static int ccp_register_sha_alg(struct list_head *head, | |||
410 | ret = crypto_register_ahash(alg); | 412 | ret = crypto_register_ahash(alg); |
411 | if (ret) { | 413 | if (ret) { |
412 | pr_err("%s ahash algorithm registration error (%d)\n", | 414 | pr_err("%s ahash algorithm registration error (%d)\n", |
413 | base->cra_name, ret); | 415 | base->cra_name, ret); |
414 | kfree(ccp_alg); | 416 | kfree(ccp_alg); |
415 | return ret; | 417 | return ret; |
416 | } | 418 | } |
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index 9aa4ae184f7f..76a96f0f44c6 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h | |||
@@ -13,7 +13,6 @@ | |||
13 | #ifndef __CCP_CRYPTO_H__ | 13 | #ifndef __CCP_CRYPTO_H__ |
14 | #define __CCP_CRYPTO_H__ | 14 | #define __CCP_CRYPTO_H__ |
15 | 15 | ||
16 | |||
17 | #include <linux/list.h> | 16 | #include <linux/list.h> |
18 | #include <linux/wait.h> | 17 | #include <linux/wait.h> |
19 | #include <linux/pci.h> | 18 | #include <linux/pci.h> |
@@ -25,7 +24,6 @@ | |||
25 | #include <crypto/hash.h> | 24 | #include <crypto/hash.h> |
26 | #include <crypto/sha.h> | 25 | #include <crypto/sha.h> |
27 | 26 | ||
28 | |||
29 | #define CCP_CRA_PRIORITY 300 | 27 | #define CCP_CRA_PRIORITY 300 |
30 | 28 | ||
31 | struct ccp_crypto_ablkcipher_alg { | 29 | struct ccp_crypto_ablkcipher_alg { |
@@ -68,7 +66,6 @@ static inline struct ccp_crypto_ahash_alg * | |||
68 | return container_of(ahash_alg, struct ccp_crypto_ahash_alg, alg); | 66 | return container_of(ahash_alg, struct ccp_crypto_ahash_alg, alg); |
69 | } | 67 | } |
70 | 68 | ||
71 | |||
72 | /***** AES related defines *****/ | 69 | /***** AES related defines *****/ |
73 | struct ccp_aes_ctx { | 70 | struct ccp_aes_ctx { |
74 | /* Fallback cipher for XTS with unsupported unit sizes */ | 71 | /* Fallback cipher for XTS with unsupported unit sizes */ |
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index ca29c120b85f..68c637af2c42 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c | |||
@@ -37,7 +37,6 @@ struct ccp_tasklet_data { | |||
37 | struct ccp_cmd *cmd; | 37 | struct ccp_cmd *cmd; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | |||
41 | static struct ccp_device *ccp_dev; | 40 | static struct ccp_device *ccp_dev; |
42 | static inline struct ccp_device *ccp_get_device(void) | 41 | static inline struct ccp_device *ccp_get_device(void) |
43 | { | 42 | { |
@@ -297,10 +296,8 @@ struct ccp_device *ccp_alloc_struct(struct device *dev) | |||
297 | struct ccp_device *ccp; | 296 | struct ccp_device *ccp; |
298 | 297 | ||
299 | ccp = kzalloc(sizeof(*ccp), GFP_KERNEL); | 298 | ccp = kzalloc(sizeof(*ccp), GFP_KERNEL); |
300 | if (ccp == NULL) { | 299 | if (!ccp) |
301 | dev_err(dev, "unable to allocate device struct\n"); | ||
302 | return NULL; | 300 | return NULL; |
303 | } | ||
304 | ccp->dev = dev; | 301 | ccp->dev = dev; |
305 | 302 | ||
306 | INIT_LIST_HEAD(&ccp->cmd); | 303 | INIT_LIST_HEAD(&ccp->cmd); |
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 62ff35a6b9ec..6ff89031fb96 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <linux/wait.h> | 21 | #include <linux/wait.h> |
22 | #include <linux/dmapool.h> | 22 | #include <linux/dmapool.h> |
23 | #include <linux/hw_random.h> | 23 | #include <linux/hw_random.h> |
24 | 24 | #include <linux/bitops.h> | |
25 | 25 | ||
26 | #define MAX_DMAPOOL_NAME_LEN 32 | 26 | #define MAX_DMAPOOL_NAME_LEN 32 |
27 | 27 | ||
@@ -33,7 +33,6 @@ | |||
33 | #define CACHE_NONE 0x00 | 33 | #define CACHE_NONE 0x00 |
34 | #define CACHE_WB_NO_ALLOC 0xb7 | 34 | #define CACHE_WB_NO_ALLOC 0xb7 |
35 | 35 | ||
36 | |||
37 | /****** Register Mappings ******/ | 36 | /****** Register Mappings ******/ |
38 | #define Q_MASK_REG 0x000 | 37 | #define Q_MASK_REG 0x000 |
39 | #define TRNG_OUT_REG 0x00c | 38 | #define TRNG_OUT_REG 0x00c |
@@ -54,8 +53,8 @@ | |||
54 | #define CMD_Q_CACHE_BASE 0x228 | 53 | #define CMD_Q_CACHE_BASE 0x228 |
55 | #define CMD_Q_CACHE_INC 0x20 | 54 | #define CMD_Q_CACHE_INC 0x20 |
56 | 55 | ||
57 | #define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f); | 56 | #define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f) |
58 | #define CMD_Q_DEPTH(__qs) (((__qs) >> 12) & 0x0000000f); | 57 | #define CMD_Q_DEPTH(__qs) (((__qs) >> 12) & 0x0000000f) |
59 | 58 | ||
60 | /****** REQ0 Related Values ******/ | 59 | /****** REQ0 Related Values ******/ |
61 | #define REQ0_WAIT_FOR_WRITE 0x00000004 | 60 | #define REQ0_WAIT_FOR_WRITE 0x00000004 |
@@ -103,7 +102,6 @@ | |||
103 | /****** REQ6 Related Values ******/ | 102 | /****** REQ6 Related Values ******/ |
104 | #define REQ6_MEMTYPE_SHIFT 16 | 103 | #define REQ6_MEMTYPE_SHIFT 16 |
105 | 104 | ||
106 | |||
107 | /****** Key Storage Block ******/ | 105 | /****** Key Storage Block ******/ |
108 | #define KSB_START 77 | 106 | #define KSB_START 77 |
109 | #define KSB_END 127 | 107 | #define KSB_END 127 |
@@ -114,7 +112,7 @@ | |||
114 | #define CCP_JOBID_MASK 0x0000003f | 112 | #define CCP_JOBID_MASK 0x0000003f |
115 | 113 | ||
116 | #define CCP_DMAPOOL_MAX_SIZE 64 | 114 | #define CCP_DMAPOOL_MAX_SIZE 64 |
117 | #define CCP_DMAPOOL_ALIGN (1 << 5) | 115 | #define CCP_DMAPOOL_ALIGN BIT(5) |
118 | 116 | ||
119 | #define CCP_REVERSE_BUF_SIZE 64 | 117 | #define CCP_REVERSE_BUF_SIZE 64 |
120 | 118 | ||
@@ -142,7 +140,6 @@ | |||
142 | #define CCP_ECC_RESULT_OFFSET 60 | 140 | #define CCP_ECC_RESULT_OFFSET 60 |
143 | #define CCP_ECC_RESULT_SUCCESS 0x0001 | 141 | #define CCP_ECC_RESULT_SUCCESS 0x0001 |
144 | 142 | ||
145 | |||
146 | struct ccp_device; | 143 | struct ccp_device; |
147 | struct ccp_cmd; | 144 | struct ccp_cmd; |
148 | 145 | ||
@@ -261,7 +258,6 @@ struct ccp_device { | |||
261 | unsigned int axcache; | 258 | unsigned int axcache; |
262 | }; | 259 | }; |
263 | 260 | ||
264 | |||
265 | int ccp_pci_init(void); | 261 | int ccp_pci_init(void); |
266 | void ccp_pci_exit(void); | 262 | void ccp_pci_exit(void); |
267 | 263 | ||
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 8729364261d7..71f2e3c89424 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c | |||
@@ -27,7 +27,6 @@ | |||
27 | 27 | ||
28 | #include "ccp-dev.h" | 28 | #include "ccp-dev.h" |
29 | 29 | ||
30 | |||
31 | enum ccp_memtype { | 30 | enum ccp_memtype { |
32 | CCP_MEMTYPE_SYSTEM = 0, | 31 | CCP_MEMTYPE_SYSTEM = 0, |
33 | CCP_MEMTYPE_KSB, | 32 | CCP_MEMTYPE_KSB, |
@@ -515,7 +514,6 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, | |||
515 | if (!wa->dma_count) | 514 | if (!wa->dma_count) |
516 | return -ENOMEM; | 515 | return -ENOMEM; |
517 | 516 | ||
518 | |||
519 | return 0; | 517 | return 0; |
520 | } | 518 | } |
521 | 519 | ||
@@ -763,8 +761,9 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, | |||
763 | sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used; | 761 | sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used; |
764 | sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len); | 762 | sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len); |
765 | op_len = min(sg_src_len, sg_dst_len); | 763 | op_len = min(sg_src_len, sg_dst_len); |
766 | } else | 764 | } else { |
767 | op_len = sg_src_len; | 765 | op_len = sg_src_len; |
766 | } | ||
768 | 767 | ||
769 | /* The data operation length will be at least block_size in length | 768 | /* The data operation length will be at least block_size in length |
770 | * or the smaller of available sg room remaining for the source or | 769 | * or the smaller of available sg room remaining for the source or |
@@ -1131,9 +1130,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1131 | if (ret) | 1130 | if (ret) |
1132 | goto e_ctx; | 1131 | goto e_ctx; |
1133 | 1132 | ||
1134 | if (in_place) | 1133 | if (in_place) { |
1135 | dst = src; | 1134 | dst = src; |
1136 | else { | 1135 | } else { |
1137 | ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len, | 1136 | ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len, |
1138 | AES_BLOCK_SIZE, DMA_FROM_DEVICE); | 1137 | AES_BLOCK_SIZE, DMA_FROM_DEVICE); |
1139 | if (ret) | 1138 | if (ret) |
@@ -1304,9 +1303,9 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, | |||
1304 | if (ret) | 1303 | if (ret) |
1305 | goto e_ctx; | 1304 | goto e_ctx; |
1306 | 1305 | ||
1307 | if (in_place) | 1306 | if (in_place) { |
1308 | dst = src; | 1307 | dst = src; |
1309 | else { | 1308 | } else { |
1310 | ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len, | 1309 | ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len, |
1311 | unit_size, DMA_FROM_DEVICE); | 1310 | unit_size, DMA_FROM_DEVICE); |
1312 | if (ret) | 1311 | if (ret) |
@@ -1451,8 +1450,9 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1451 | goto e_ctx; | 1450 | goto e_ctx; |
1452 | } | 1451 | } |
1453 | memcpy(ctx.address, init, CCP_SHA_CTXSIZE); | 1452 | memcpy(ctx.address, init, CCP_SHA_CTXSIZE); |
1454 | } else | 1453 | } else { |
1455 | ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); | 1454 | ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); |
1455 | } | ||
1456 | 1456 | ||
1457 | ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, | 1457 | ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, |
1458 | CCP_PASSTHRU_BYTESWAP_256BIT); | 1458 | CCP_PASSTHRU_BYTESWAP_256BIT); |
@@ -1732,9 +1732,9 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, | |||
1732 | if (ret) | 1732 | if (ret) |
1733 | goto e_mask; | 1733 | goto e_mask; |
1734 | 1734 | ||
1735 | if (in_place) | 1735 | if (in_place) { |
1736 | dst = src; | 1736 | dst = src; |
1737 | else { | 1737 | } else { |
1738 | ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len, | 1738 | ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len, |
1739 | CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE); | 1739 | CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE); |
1740 | if (ret) | 1740 | if (ret) |
@@ -1974,7 +1974,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1974 | src.address += CCP_ECC_OPERAND_SIZE; | 1974 | src.address += CCP_ECC_OPERAND_SIZE; |
1975 | 1975 | ||
1976 | /* Set the first point Z coordianate to 1 */ | 1976 | /* Set the first point Z coordianate to 1 */ |
1977 | *(src.address) = 0x01; | 1977 | *src.address = 0x01; |
1978 | src.address += CCP_ECC_OPERAND_SIZE; | 1978 | src.address += CCP_ECC_OPERAND_SIZE; |
1979 | 1979 | ||
1980 | if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { | 1980 | if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { |
@@ -1989,7 +1989,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1989 | src.address += CCP_ECC_OPERAND_SIZE; | 1989 | src.address += CCP_ECC_OPERAND_SIZE; |
1990 | 1990 | ||
1991 | /* Set the second point Z coordianate to 1 */ | 1991 | /* Set the second point Z coordianate to 1 */ |
1992 | *(src.address) = 0x01; | 1992 | *src.address = 0x01; |
1993 | src.address += CCP_ECC_OPERAND_SIZE; | 1993 | src.address += CCP_ECC_OPERAND_SIZE; |
1994 | } else { | 1994 | } else { |
1995 | /* Copy the Domain "a" parameter */ | 1995 | /* Copy the Domain "a" parameter */ |
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c index 7f89c946adfe..1980f77c29ef 100644 --- a/drivers/crypto/ccp/ccp-pci.c +++ b/drivers/crypto/ccp/ccp-pci.c | |||
@@ -204,7 +204,7 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
204 | 204 | ||
205 | ret = -EIO; | 205 | ret = -EIO; |
206 | ccp->io_map = pci_iomap(pdev, bar, 0); | 206 | ccp->io_map = pci_iomap(pdev, bar, 0); |
207 | if (ccp->io_map == NULL) { | 207 | if (!ccp->io_map) { |
208 | dev_err(dev, "pci_iomap failed\n"); | 208 | dev_err(dev, "pci_iomap failed\n"); |
209 | goto e_device; | 209 | goto e_device; |
210 | } | 210 | } |
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c index 8c50bad25f7e..9e09c5023b5f 100644 --- a/drivers/crypto/ccp/ccp-platform.c +++ b/drivers/crypto/ccp/ccp-platform.c | |||
@@ -26,7 +26,6 @@ | |||
26 | 26 | ||
27 | #include "ccp-dev.h" | 27 | #include "ccp-dev.h" |
28 | 28 | ||
29 | |||
30 | static int ccp_get_irq(struct ccp_device *ccp) | 29 | static int ccp_get_irq(struct ccp_device *ccp) |
31 | { | 30 | { |
32 | struct device *dev = ccp->dev; | 31 | struct device *dev = ccp->dev; |