diff options
-rw-r--r-- | Documentation/padata.txt | 97 | ||||
-rw-r--r-- | arch/s390/crypto/Makefile | 2 | ||||
-rw-r--r-- | arch/s390/crypto/crypto_des.h | 2 | ||||
-rw-r--r-- | arch/s390/crypto/des_s390.c | 238 | ||||
-rw-r--r-- | crypto/Kconfig | 15 | ||||
-rw-r--r-- | crypto/Makefile | 4 | ||||
-rw-r--r-- | crypto/algboss.c | 4 | ||||
-rw-r--r-- | crypto/authenc.c | 2 | ||||
-rw-r--r-- | crypto/ctr.c | 2 | ||||
-rw-r--r-- | crypto/pcrypt.c | 241 | ||||
-rw-r--r-- | crypto/testmgr.c | 14 | ||||
-rw-r--r-- | crypto/twofish_generic.c (renamed from crypto/twofish.c) | 1 | ||||
-rw-r--r-- | crypto/xts.c | 2 | ||||
-rw-r--r-- | drivers/char/hw_random/n2-drv.c | 2 | ||||
-rw-r--r-- | drivers/char/random.c | 2 | ||||
-rw-r--r-- | drivers/crypto/geode-aes.c | 2 | ||||
-rw-r--r-- | drivers/crypto/hifn_795x.c | 4 | ||||
-rw-r--r-- | drivers/crypto/mv_cesa.c | 10 | ||||
-rw-r--r-- | drivers/crypto/n2_core.c | 415 | ||||
-rw-r--r-- | drivers/crypto/omap-sham.c | 1 | ||||
-rw-r--r-- | drivers/crypto/talitos.c | 77 | ||||
-rw-r--r-- | include/linux/padata.h | 121 | ||||
-rw-r--r-- | kernel/padata.c | 755 |
23 files changed, 1306 insertions, 707 deletions
diff --git a/Documentation/padata.txt b/Documentation/padata.txt index 269d7d0d8335..473ebf22cd69 100644 --- a/Documentation/padata.txt +++ b/Documentation/padata.txt | |||
@@ -1,5 +1,5 @@ | |||
1 | The padata parallel execution mechanism | 1 | The padata parallel execution mechanism |
2 | Last updated for 2.6.34 | 2 | Last updated for 2.6.36 |
3 | 3 | ||
4 | Padata is a mechanism by which the kernel can farm work out to be done in | 4 | Padata is a mechanism by which the kernel can farm work out to be done in |
5 | parallel on multiple CPUs while retaining the ordering of tasks. It was | 5 | parallel on multiple CPUs while retaining the ordering of tasks. It was |
@@ -13,31 +13,86 @@ overall control of how tasks are to be run: | |||
13 | 13 | ||
14 | #include <linux/padata.h> | 14 | #include <linux/padata.h> |
15 | 15 | ||
16 | struct padata_instance *padata_alloc(const struct cpumask *cpumask, | 16 | struct padata_instance *padata_alloc(struct workqueue_struct *wq, |
17 | struct workqueue_struct *wq); | 17 | const struct cpumask *pcpumask, |
18 | const struct cpumask *cbcpumask); | ||
18 | 19 | ||
19 | The cpumask describes which processors will be used to execute work | 20 | The pcpumask describes which processors will be used to execute work |
20 | submitted to this instance. The workqueue wq is where the work will | 21 | submitted to this instance in parallel. The cbcpumask defines which |
21 | actually be done; it should be a multithreaded queue, naturally. | 22 | processors are allowed to use as the serialization callback processor. |
23 | The workqueue wq is where the work will actually be done; it should be | ||
24 | a multithreaded queue, naturally. | ||
25 | |||
26 | To allocate a padata instance with the cpu_possible_mask for both | ||
27 | cpumasks this helper function can be used: | ||
28 | |||
29 | struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq); | ||
30 | |||
31 | Note: Padata maintains two kinds of cpumasks internally. The user supplied | ||
32 | cpumasks, submitted by padata_alloc/padata_alloc_possible and the 'usable' | ||
33 | cpumasks. The usable cpumasks are always the subset of active cpus in the | ||
34 | user supplied cpumasks, these are the cpumasks padata actually use. So | ||
35 | it is legal to supply a cpumask to padata that contains offline cpus. | ||
36 | Once a offline cpu in the user supplied cpumask comes online, padata | ||
37 | is going to use it. | ||
22 | 38 | ||
23 | There are functions for enabling and disabling the instance: | 39 | There are functions for enabling and disabling the instance: |
24 | 40 | ||
25 | void padata_start(struct padata_instance *pinst); | 41 | int padata_start(struct padata_instance *pinst); |
26 | void padata_stop(struct padata_instance *pinst); | 42 | void padata_stop(struct padata_instance *pinst); |
27 | 43 | ||
28 | These functions literally do nothing beyond setting or clearing the | 44 | These functions are setting or clearing the "PADATA_INIT" flag; |
29 | "padata_start() was called" flag; if that flag is not set, other functions | 45 | if that flag is not set, other functions will refuse to work. |
30 | will refuse to work. | 46 | padata_start returns zero on success (flag set) or -EINVAL if the |
47 | padata cpumask contains no active cpu (flag not set). | ||
48 | padata_stop clears the flag and blocks until the padata instance | ||
49 | is unused. | ||
31 | 50 | ||
32 | The list of CPUs to be used can be adjusted with these functions: | 51 | The list of CPUs to be used can be adjusted with these functions: |
33 | 52 | ||
34 | int padata_set_cpumask(struct padata_instance *pinst, | 53 | int padata_set_cpumasks(struct padata_instance *pinst, |
54 | cpumask_var_t pcpumask, | ||
55 | cpumask_var_t cbcpumask); | ||
56 | int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, | ||
35 | cpumask_var_t cpumask); | 57 | cpumask_var_t cpumask); |
36 | int padata_add_cpu(struct padata_instance *pinst, int cpu); | 58 | int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask); |
37 | int padata_remove_cpu(struct padata_instance *pinst, int cpu); | 59 | int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask); |
60 | |||
61 | Changing the CPU masks are expensive operations, though, so it should not be | ||
62 | done with great frequency. | ||
63 | |||
64 | It's possible to change both cpumasks of a padata instance with | ||
65 | padata_set_cpumasks by specifying the cpumasks for parallel execution (pcpumask) | ||
66 | and for the serial callback function (cbcpumask). padata_set_cpumask is to | ||
67 | change just one of the cpumasks. Here cpumask_type is one of PADATA_CPU_SERIAL, | ||
68 | PADATA_CPU_PARALLEL and cpumask specifies the new cpumask to use. | ||
69 | To simply add or remove one cpu from a certain cpumask the functions | ||
70 | padata_add_cpu/padata_remove_cpu are used. cpu specifies the cpu to add or | ||
71 | remove and mask is one of PADATA_CPU_SERIAL, PADATA_CPU_PARALLEL. | ||
72 | |||
73 | If a user is interested in padata cpumask changes, he can register to | ||
74 | the padata cpumask change notifier: | ||
75 | |||
76 | int padata_register_cpumask_notifier(struct padata_instance *pinst, | ||
77 | struct notifier_block *nblock); | ||
78 | |||
79 | To unregister from that notifier: | ||
80 | |||
81 | int padata_unregister_cpumask_notifier(struct padata_instance *pinst, | ||
82 | struct notifier_block *nblock); | ||
83 | |||
84 | The padata cpumask change notifier notifies about changes of the usable | ||
85 | cpumasks, i.e. the subset of active cpus in the user supplied cpumask. | ||
86 | |||
87 | Padata calls the notifier chain with: | ||
88 | |||
89 | blocking_notifier_call_chain(&pinst->cpumask_change_notifier, | ||
90 | notification_mask, | ||
91 | &pd_new->cpumask); | ||
38 | 92 | ||
39 | Changing the CPU mask has the look of an expensive operation, though, so it | 93 | Here cpumask_change_notifier is registered notifier, notification_mask |
40 | probably should not be done with great frequency. | 94 | is one of PADATA_CPU_SERIAL, PADATA_CPU_PARALLEL and cpumask is a pointer |
95 | to a struct padata_cpumask that contains the new cpumask informations. | ||
41 | 96 | ||
42 | Actually submitting work to the padata instance requires the creation of a | 97 | Actually submitting work to the padata instance requires the creation of a |
43 | padata_priv structure: | 98 | padata_priv structure: |
@@ -50,7 +105,7 @@ padata_priv structure: | |||
50 | 105 | ||
51 | This structure will almost certainly be embedded within some larger | 106 | This structure will almost certainly be embedded within some larger |
52 | structure specific to the work to be done. Most its fields are private to | 107 | structure specific to the work to be done. Most its fields are private to |
53 | padata, but the structure should be zeroed at initialization time, and the | 108 | padata, but the structure should be zeroed at initialisation time, and the |
54 | parallel() and serial() functions should be provided. Those functions will | 109 | parallel() and serial() functions should be provided. Those functions will |
55 | be called in the process of getting the work done as we will see | 110 | be called in the process of getting the work done as we will see |
56 | momentarily. | 111 | momentarily. |
@@ -63,12 +118,10 @@ The submission of work is done with: | |||
63 | The pinst and padata structures must be set up as described above; cb_cpu | 118 | The pinst and padata structures must be set up as described above; cb_cpu |
64 | specifies which CPU will be used for the final callback when the work is | 119 | specifies which CPU will be used for the final callback when the work is |
65 | done; it must be in the current instance's CPU mask. The return value from | 120 | done; it must be in the current instance's CPU mask. The return value from |
66 | padata_do_parallel() is a little strange; zero is an error return | 121 | padata_do_parallel() is zero on success, indicating that the work is in |
67 | indicating that the caller forgot the padata_start() formalities. -EBUSY | 122 | progress. -EBUSY means that somebody, somewhere else is messing with the |
68 | means that somebody, somewhere else is messing with the instance's CPU | 123 | instance's CPU mask, while -EINVAL is a complaint about cb_cpu not being |
69 | mask, while -EINVAL is a complaint about cb_cpu not being in that CPU mask. | 124 | in that CPU mask or about a not running instance. |
70 | If all goes well, this function will return -EINPROGRESS, indicating that | ||
71 | the work is in progress. | ||
72 | 125 | ||
73 | Each task submitted to padata_do_parallel() will, in turn, be passed to | 126 | Each task submitted to padata_do_parallel() will, in turn, be passed to |
74 | exactly one call to the above-mentioned parallel() function, on one CPU, so | 127 | exactly one call to the above-mentioned parallel() function, on one CPU, so |
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile index 6a1157fa4f98..1cf81d77c5a5 100644 --- a/arch/s390/crypto/Makefile +++ b/arch/s390/crypto/Makefile | |||
@@ -5,6 +5,6 @@ | |||
5 | obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o sha_common.o | 5 | obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o sha_common.o |
6 | obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o sha_common.o | 6 | obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o sha_common.o |
7 | obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o | 7 | obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o |
8 | obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o des_check_key.o | 8 | obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o |
9 | obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o | 9 | obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o |
10 | obj-$(CONFIG_S390_PRNG) += prng.o | 10 | obj-$(CONFIG_S390_PRNG) += prng.o |
diff --git a/arch/s390/crypto/crypto_des.h b/arch/s390/crypto/crypto_des.h index c964b64111dd..6210457ceebb 100644 --- a/arch/s390/crypto/crypto_des.h +++ b/arch/s390/crypto/crypto_des.h | |||
@@ -15,4 +15,4 @@ | |||
15 | 15 | ||
16 | extern int crypto_des_check_key(const u8*, unsigned int, u32*); | 16 | extern int crypto_des_check_key(const u8*, unsigned int, u32*); |
17 | 17 | ||
18 | #endif //__CRYPTO_DES_H__ | 18 | #endif /*__CRYPTO_DES_H__*/ |
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 2bc479ab3a66..cc5420118393 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c | |||
@@ -14,32 +14,21 @@ | |||
14 | * | 14 | * |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <crypto/algapi.h> | ||
18 | #include <linux/init.h> | 17 | #include <linux/init.h> |
19 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/crypto.h> | ||
20 | #include <crypto/algapi.h> | ||
21 | #include <crypto/des.h> | ||
20 | 22 | ||
21 | #include "crypt_s390.h" | 23 | #include "crypt_s390.h" |
22 | #include "crypto_des.h" | ||
23 | |||
24 | #define DES_BLOCK_SIZE 8 | ||
25 | #define DES_KEY_SIZE 8 | ||
26 | |||
27 | #define DES3_128_KEY_SIZE (2 * DES_KEY_SIZE) | ||
28 | #define DES3_128_BLOCK_SIZE DES_BLOCK_SIZE | ||
29 | 24 | ||
30 | #define DES3_192_KEY_SIZE (3 * DES_KEY_SIZE) | 25 | #define DES3_192_KEY_SIZE (3 * DES_KEY_SIZE) |
31 | #define DES3_192_BLOCK_SIZE DES_BLOCK_SIZE | ||
32 | 26 | ||
33 | struct crypt_s390_des_ctx { | 27 | struct crypt_s390_des_ctx { |
34 | u8 iv[DES_BLOCK_SIZE]; | 28 | u8 iv[DES_BLOCK_SIZE]; |
35 | u8 key[DES_KEY_SIZE]; | 29 | u8 key[DES_KEY_SIZE]; |
36 | }; | 30 | }; |
37 | 31 | ||
38 | struct crypt_s390_des3_128_ctx { | ||
39 | u8 iv[DES_BLOCK_SIZE]; | ||
40 | u8 key[DES3_128_KEY_SIZE]; | ||
41 | }; | ||
42 | |||
43 | struct crypt_s390_des3_192_ctx { | 32 | struct crypt_s390_des3_192_ctx { |
44 | u8 iv[DES_BLOCK_SIZE]; | 33 | u8 iv[DES_BLOCK_SIZE]; |
45 | u8 key[DES3_192_KEY_SIZE]; | 34 | u8 key[DES3_192_KEY_SIZE]; |
@@ -50,13 +39,16 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key, | |||
50 | { | 39 | { |
51 | struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm); | 40 | struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm); |
52 | u32 *flags = &tfm->crt_flags; | 41 | u32 *flags = &tfm->crt_flags; |
53 | int ret; | 42 | u32 tmp[DES_EXPKEY_WORDS]; |
54 | 43 | ||
55 | /* test if key is valid (not a weak key) */ | 44 | /* check for weak keys */ |
56 | ret = crypto_des_check_key(key, keylen, flags); | 45 | if (!des_ekey(tmp, key) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { |
57 | if (ret == 0) | 46 | *flags |= CRYPTO_TFM_RES_WEAK_KEY; |
58 | memcpy(dctx->key, key, keylen); | 47 | return -EINVAL; |
59 | return ret; | 48 | } |
49 | |||
50 | memcpy(dctx->key, key, keylen); | ||
51 | return 0; | ||
60 | } | 52 | } |
61 | 53 | ||
62 | static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) | 54 | static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
@@ -237,165 +229,6 @@ static struct crypto_alg cbc_des_alg = { | |||
237 | * complementation keys. Any weakness is obviated by the use of | 229 | * complementation keys. Any weakness is obviated by the use of |
238 | * multiple keys. | 230 | * multiple keys. |
239 | * | 231 | * |
240 | * However, if the two independent 64-bit keys are equal, | ||
241 | * then the DES3 operation is simply the same as DES. | ||
242 | * Implementers MUST reject keys that exhibit this property. | ||
243 | * | ||
244 | */ | ||
245 | static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key, | ||
246 | unsigned int keylen) | ||
247 | { | ||
248 | int i, ret; | ||
249 | struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm); | ||
250 | const u8 *temp_key = key; | ||
251 | u32 *flags = &tfm->crt_flags; | ||
252 | |||
253 | if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE)) && | ||
254 | (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { | ||
255 | *flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
256 | return -EINVAL; | ||
257 | } | ||
258 | for (i = 0; i < 2; i++, temp_key += DES_KEY_SIZE) { | ||
259 | ret = crypto_des_check_key(temp_key, DES_KEY_SIZE, flags); | ||
260 | if (ret < 0) | ||
261 | return ret; | ||
262 | } | ||
263 | memcpy(dctx->key, key, keylen); | ||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | static void des3_128_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | ||
268 | { | ||
269 | struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm); | ||
270 | |||
271 | crypt_s390_km(KM_TDEA_128_ENCRYPT, dctx->key, dst, (void*)src, | ||
272 | DES3_128_BLOCK_SIZE); | ||
273 | } | ||
274 | |||
275 | static void des3_128_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | ||
276 | { | ||
277 | struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm); | ||
278 | |||
279 | crypt_s390_km(KM_TDEA_128_DECRYPT, dctx->key, dst, (void*)src, | ||
280 | DES3_128_BLOCK_SIZE); | ||
281 | } | ||
282 | |||
283 | static struct crypto_alg des3_128_alg = { | ||
284 | .cra_name = "des3_ede128", | ||
285 | .cra_driver_name = "des3_ede128-s390", | ||
286 | .cra_priority = CRYPT_S390_PRIORITY, | ||
287 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | ||
288 | .cra_blocksize = DES3_128_BLOCK_SIZE, | ||
289 | .cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx), | ||
290 | .cra_module = THIS_MODULE, | ||
291 | .cra_list = LIST_HEAD_INIT(des3_128_alg.cra_list), | ||
292 | .cra_u = { | ||
293 | .cipher = { | ||
294 | .cia_min_keysize = DES3_128_KEY_SIZE, | ||
295 | .cia_max_keysize = DES3_128_KEY_SIZE, | ||
296 | .cia_setkey = des3_128_setkey, | ||
297 | .cia_encrypt = des3_128_encrypt, | ||
298 | .cia_decrypt = des3_128_decrypt, | ||
299 | } | ||
300 | } | ||
301 | }; | ||
302 | |||
303 | static int ecb_des3_128_encrypt(struct blkcipher_desc *desc, | ||
304 | struct scatterlist *dst, | ||
305 | struct scatterlist *src, unsigned int nbytes) | ||
306 | { | ||
307 | struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | ||
308 | struct blkcipher_walk walk; | ||
309 | |||
310 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
311 | return ecb_desall_crypt(desc, KM_TDEA_128_ENCRYPT, sctx->key, &walk); | ||
312 | } | ||
313 | |||
314 | static int ecb_des3_128_decrypt(struct blkcipher_desc *desc, | ||
315 | struct scatterlist *dst, | ||
316 | struct scatterlist *src, unsigned int nbytes) | ||
317 | { | ||
318 | struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | ||
319 | struct blkcipher_walk walk; | ||
320 | |||
321 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
322 | return ecb_desall_crypt(desc, KM_TDEA_128_DECRYPT, sctx->key, &walk); | ||
323 | } | ||
324 | |||
325 | static struct crypto_alg ecb_des3_128_alg = { | ||
326 | .cra_name = "ecb(des3_ede128)", | ||
327 | .cra_driver_name = "ecb-des3_ede128-s390", | ||
328 | .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, | ||
329 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
330 | .cra_blocksize = DES3_128_BLOCK_SIZE, | ||
331 | .cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx), | ||
332 | .cra_type = &crypto_blkcipher_type, | ||
333 | .cra_module = THIS_MODULE, | ||
334 | .cra_list = LIST_HEAD_INIT( | ||
335 | ecb_des3_128_alg.cra_list), | ||
336 | .cra_u = { | ||
337 | .blkcipher = { | ||
338 | .min_keysize = DES3_128_KEY_SIZE, | ||
339 | .max_keysize = DES3_128_KEY_SIZE, | ||
340 | .setkey = des3_128_setkey, | ||
341 | .encrypt = ecb_des3_128_encrypt, | ||
342 | .decrypt = ecb_des3_128_decrypt, | ||
343 | } | ||
344 | } | ||
345 | }; | ||
346 | |||
347 | static int cbc_des3_128_encrypt(struct blkcipher_desc *desc, | ||
348 | struct scatterlist *dst, | ||
349 | struct scatterlist *src, unsigned int nbytes) | ||
350 | { | ||
351 | struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | ||
352 | struct blkcipher_walk walk; | ||
353 | |||
354 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
355 | return cbc_desall_crypt(desc, KMC_TDEA_128_ENCRYPT, sctx->iv, &walk); | ||
356 | } | ||
357 | |||
358 | static int cbc_des3_128_decrypt(struct blkcipher_desc *desc, | ||
359 | struct scatterlist *dst, | ||
360 | struct scatterlist *src, unsigned int nbytes) | ||
361 | { | ||
362 | struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | ||
363 | struct blkcipher_walk walk; | ||
364 | |||
365 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
366 | return cbc_desall_crypt(desc, KMC_TDEA_128_DECRYPT, sctx->iv, &walk); | ||
367 | } | ||
368 | |||
369 | static struct crypto_alg cbc_des3_128_alg = { | ||
370 | .cra_name = "cbc(des3_ede128)", | ||
371 | .cra_driver_name = "cbc-des3_ede128-s390", | ||
372 | .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, | ||
373 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
374 | .cra_blocksize = DES3_128_BLOCK_SIZE, | ||
375 | .cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx), | ||
376 | .cra_type = &crypto_blkcipher_type, | ||
377 | .cra_module = THIS_MODULE, | ||
378 | .cra_list = LIST_HEAD_INIT( | ||
379 | cbc_des3_128_alg.cra_list), | ||
380 | .cra_u = { | ||
381 | .blkcipher = { | ||
382 | .min_keysize = DES3_128_KEY_SIZE, | ||
383 | .max_keysize = DES3_128_KEY_SIZE, | ||
384 | .ivsize = DES3_128_BLOCK_SIZE, | ||
385 | .setkey = des3_128_setkey, | ||
386 | .encrypt = cbc_des3_128_encrypt, | ||
387 | .decrypt = cbc_des3_128_decrypt, | ||
388 | } | ||
389 | } | ||
390 | }; | ||
391 | |||
392 | /* | ||
393 | * RFC2451: | ||
394 | * | ||
395 | * For DES-EDE3, there is no known need to reject weak or | ||
396 | * complementation keys. Any weakness is obviated by the use of | ||
397 | * multiple keys. | ||
398 | * | ||
399 | * However, if the first two or last two independent 64-bit keys are | 232 | * However, if the first two or last two independent 64-bit keys are |
400 | * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the | 233 | * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the |
401 | * same as DES. Implementers MUST reject keys that exhibit this | 234 | * same as DES. Implementers MUST reject keys that exhibit this |
@@ -405,9 +238,7 @@ static struct crypto_alg cbc_des3_128_alg = { | |||
405 | static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key, | 238 | static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key, |
406 | unsigned int keylen) | 239 | unsigned int keylen) |
407 | { | 240 | { |
408 | int i, ret; | ||
409 | struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm); | 241 | struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm); |
410 | const u8 *temp_key = key; | ||
411 | u32 *flags = &tfm->crt_flags; | 242 | u32 *flags = &tfm->crt_flags; |
412 | 243 | ||
413 | if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && | 244 | if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && |
@@ -417,11 +248,6 @@ static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key, | |||
417 | *flags |= CRYPTO_TFM_RES_WEAK_KEY; | 248 | *flags |= CRYPTO_TFM_RES_WEAK_KEY; |
418 | return -EINVAL; | 249 | return -EINVAL; |
419 | } | 250 | } |
420 | for (i = 0; i < 3; i++, temp_key += DES_KEY_SIZE) { | ||
421 | ret = crypto_des_check_key(temp_key, DES_KEY_SIZE, flags); | ||
422 | if (ret < 0) | ||
423 | return ret; | ||
424 | } | ||
425 | memcpy(dctx->key, key, keylen); | 251 | memcpy(dctx->key, key, keylen); |
426 | return 0; | 252 | return 0; |
427 | } | 253 | } |
@@ -431,7 +257,7 @@ static void des3_192_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |||
431 | struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm); | 257 | struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm); |
432 | 258 | ||
433 | crypt_s390_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src, | 259 | crypt_s390_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src, |
434 | DES3_192_BLOCK_SIZE); | 260 | DES_BLOCK_SIZE); |
435 | } | 261 | } |
436 | 262 | ||
437 | static void des3_192_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | 263 | static void des3_192_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) |
@@ -439,7 +265,7 @@ static void des3_192_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |||
439 | struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm); | 265 | struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm); |
440 | 266 | ||
441 | crypt_s390_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src, | 267 | crypt_s390_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src, |
442 | DES3_192_BLOCK_SIZE); | 268 | DES_BLOCK_SIZE); |
443 | } | 269 | } |
444 | 270 | ||
445 | static struct crypto_alg des3_192_alg = { | 271 | static struct crypto_alg des3_192_alg = { |
@@ -447,7 +273,7 @@ static struct crypto_alg des3_192_alg = { | |||
447 | .cra_driver_name = "des3_ede-s390", | 273 | .cra_driver_name = "des3_ede-s390", |
448 | .cra_priority = CRYPT_S390_PRIORITY, | 274 | .cra_priority = CRYPT_S390_PRIORITY, |
449 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 275 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, |
450 | .cra_blocksize = DES3_192_BLOCK_SIZE, | 276 | .cra_blocksize = DES_BLOCK_SIZE, |
451 | .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx), | 277 | .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx), |
452 | .cra_module = THIS_MODULE, | 278 | .cra_module = THIS_MODULE, |
453 | .cra_list = LIST_HEAD_INIT(des3_192_alg.cra_list), | 279 | .cra_list = LIST_HEAD_INIT(des3_192_alg.cra_list), |
@@ -489,7 +315,7 @@ static struct crypto_alg ecb_des3_192_alg = { | |||
489 | .cra_driver_name = "ecb-des3_ede-s390", | 315 | .cra_driver_name = "ecb-des3_ede-s390", |
490 | .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, | 316 | .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, |
491 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | 317 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, |
492 | .cra_blocksize = DES3_192_BLOCK_SIZE, | 318 | .cra_blocksize = DES_BLOCK_SIZE, |
493 | .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx), | 319 | .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx), |
494 | .cra_type = &crypto_blkcipher_type, | 320 | .cra_type = &crypto_blkcipher_type, |
495 | .cra_module = THIS_MODULE, | 321 | .cra_module = THIS_MODULE, |
@@ -533,7 +359,7 @@ static struct crypto_alg cbc_des3_192_alg = { | |||
533 | .cra_driver_name = "cbc-des3_ede-s390", | 359 | .cra_driver_name = "cbc-des3_ede-s390", |
534 | .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, | 360 | .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, |
535 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | 361 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, |
536 | .cra_blocksize = DES3_192_BLOCK_SIZE, | 362 | .cra_blocksize = DES_BLOCK_SIZE, |
537 | .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx), | 363 | .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx), |
538 | .cra_type = &crypto_blkcipher_type, | 364 | .cra_type = &crypto_blkcipher_type, |
539 | .cra_module = THIS_MODULE, | 365 | .cra_module = THIS_MODULE, |
@@ -543,7 +369,7 @@ static struct crypto_alg cbc_des3_192_alg = { | |||
543 | .blkcipher = { | 369 | .blkcipher = { |
544 | .min_keysize = DES3_192_KEY_SIZE, | 370 | .min_keysize = DES3_192_KEY_SIZE, |
545 | .max_keysize = DES3_192_KEY_SIZE, | 371 | .max_keysize = DES3_192_KEY_SIZE, |
546 | .ivsize = DES3_192_BLOCK_SIZE, | 372 | .ivsize = DES_BLOCK_SIZE, |
547 | .setkey = des3_192_setkey, | 373 | .setkey = des3_192_setkey, |
548 | .encrypt = cbc_des3_192_encrypt, | 374 | .encrypt = cbc_des3_192_encrypt, |
549 | .decrypt = cbc_des3_192_decrypt, | 375 | .decrypt = cbc_des3_192_decrypt, |
@@ -553,10 +379,9 @@ static struct crypto_alg cbc_des3_192_alg = { | |||
553 | 379 | ||
554 | static int des_s390_init(void) | 380 | static int des_s390_init(void) |
555 | { | 381 | { |
556 | int ret = 0; | 382 | int ret; |
557 | 383 | ||
558 | if (!crypt_s390_func_available(KM_DEA_ENCRYPT) || | 384 | if (!crypt_s390_func_available(KM_DEA_ENCRYPT) || |
559 | !crypt_s390_func_available(KM_TDEA_128_ENCRYPT) || | ||
560 | !crypt_s390_func_available(KM_TDEA_192_ENCRYPT)) | 385 | !crypt_s390_func_available(KM_TDEA_192_ENCRYPT)) |
561 | return -EOPNOTSUPP; | 386 | return -EOPNOTSUPP; |
562 | 387 | ||
@@ -569,17 +394,6 @@ static int des_s390_init(void) | |||
569 | ret = crypto_register_alg(&cbc_des_alg); | 394 | ret = crypto_register_alg(&cbc_des_alg); |
570 | if (ret) | 395 | if (ret) |
571 | goto cbc_des_err; | 396 | goto cbc_des_err; |
572 | |||
573 | ret = crypto_register_alg(&des3_128_alg); | ||
574 | if (ret) | ||
575 | goto des3_128_err; | ||
576 | ret = crypto_register_alg(&ecb_des3_128_alg); | ||
577 | if (ret) | ||
578 | goto ecb_des3_128_err; | ||
579 | ret = crypto_register_alg(&cbc_des3_128_alg); | ||
580 | if (ret) | ||
581 | goto cbc_des3_128_err; | ||
582 | |||
583 | ret = crypto_register_alg(&des3_192_alg); | 397 | ret = crypto_register_alg(&des3_192_alg); |
584 | if (ret) | 398 | if (ret) |
585 | goto des3_192_err; | 399 | goto des3_192_err; |
@@ -589,7 +403,6 @@ static int des_s390_init(void) | |||
589 | ret = crypto_register_alg(&cbc_des3_192_alg); | 403 | ret = crypto_register_alg(&cbc_des3_192_alg); |
590 | if (ret) | 404 | if (ret) |
591 | goto cbc_des3_192_err; | 405 | goto cbc_des3_192_err; |
592 | |||
593 | out: | 406 | out: |
594 | return ret; | 407 | return ret; |
595 | 408 | ||
@@ -598,12 +411,6 @@ cbc_des3_192_err: | |||
598 | ecb_des3_192_err: | 411 | ecb_des3_192_err: |
599 | crypto_unregister_alg(&des3_192_alg); | 412 | crypto_unregister_alg(&des3_192_alg); |
600 | des3_192_err: | 413 | des3_192_err: |
601 | crypto_unregister_alg(&cbc_des3_128_alg); | ||
602 | cbc_des3_128_err: | ||
603 | crypto_unregister_alg(&ecb_des3_128_alg); | ||
604 | ecb_des3_128_err: | ||
605 | crypto_unregister_alg(&des3_128_alg); | ||
606 | des3_128_err: | ||
607 | crypto_unregister_alg(&cbc_des_alg); | 414 | crypto_unregister_alg(&cbc_des_alg); |
608 | cbc_des_err: | 415 | cbc_des_err: |
609 | crypto_unregister_alg(&ecb_des_alg); | 416 | crypto_unregister_alg(&ecb_des_alg); |
@@ -613,21 +420,18 @@ des_err: | |||
613 | goto out; | 420 | goto out; |
614 | } | 421 | } |
615 | 422 | ||
616 | static void __exit des_s390_fini(void) | 423 | static void __exit des_s390_exit(void) |
617 | { | 424 | { |
618 | crypto_unregister_alg(&cbc_des3_192_alg); | 425 | crypto_unregister_alg(&cbc_des3_192_alg); |
619 | crypto_unregister_alg(&ecb_des3_192_alg); | 426 | crypto_unregister_alg(&ecb_des3_192_alg); |
620 | crypto_unregister_alg(&des3_192_alg); | 427 | crypto_unregister_alg(&des3_192_alg); |
621 | crypto_unregister_alg(&cbc_des3_128_alg); | ||
622 | crypto_unregister_alg(&ecb_des3_128_alg); | ||
623 | crypto_unregister_alg(&des3_128_alg); | ||
624 | crypto_unregister_alg(&cbc_des_alg); | 428 | crypto_unregister_alg(&cbc_des_alg); |
625 | crypto_unregister_alg(&ecb_des_alg); | 429 | crypto_unregister_alg(&ecb_des_alg); |
626 | crypto_unregister_alg(&des_alg); | 430 | crypto_unregister_alg(&des_alg); |
627 | } | 431 | } |
628 | 432 | ||
629 | module_init(des_s390_init); | 433 | module_init(des_s390_init); |
630 | module_exit(des_s390_fini); | 434 | module_exit(des_s390_exit); |
631 | 435 | ||
632 | MODULE_ALIAS("des"); | 436 | MODULE_ALIAS("des"); |
633 | MODULE_ALIAS("des3_ede"); | 437 | MODULE_ALIAS("des3_ede"); |
diff --git a/crypto/Kconfig b/crypto/Kconfig index 9d9434f08c92..1cd497d7a15a 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -80,6 +80,11 @@ config CRYPTO_RNG2 | |||
80 | 80 | ||
81 | config CRYPTO_PCOMP | 81 | config CRYPTO_PCOMP |
82 | tristate | 82 | tristate |
83 | select CRYPTO_PCOMP2 | ||
84 | select CRYPTO_ALGAPI | ||
85 | |||
86 | config CRYPTO_PCOMP2 | ||
87 | tristate | ||
83 | select CRYPTO_ALGAPI2 | 88 | select CRYPTO_ALGAPI2 |
84 | 89 | ||
85 | config CRYPTO_MANAGER | 90 | config CRYPTO_MANAGER |
@@ -94,7 +99,15 @@ config CRYPTO_MANAGER2 | |||
94 | select CRYPTO_AEAD2 | 99 | select CRYPTO_AEAD2 |
95 | select CRYPTO_HASH2 | 100 | select CRYPTO_HASH2 |
96 | select CRYPTO_BLKCIPHER2 | 101 | select CRYPTO_BLKCIPHER2 |
97 | select CRYPTO_PCOMP | 102 | select CRYPTO_PCOMP2 |
103 | |||
104 | config CRYPTO_MANAGER_TESTS | ||
105 | bool "Run algolithms' self-tests" | ||
106 | default y | ||
107 | depends on CRYPTO_MANAGER2 | ||
108 | help | ||
109 | Run cryptomanager's tests for the new crypto algorithms being | ||
110 | registered. | ||
98 | 111 | ||
99 | config CRYPTO_GF128MUL | 112 | config CRYPTO_GF128MUL |
100 | tristate "GF(2^128) multiplication functions (EXPERIMENTAL)" | 113 | tristate "GF(2^128) multiplication functions (EXPERIMENTAL)" |
diff --git a/crypto/Makefile b/crypto/Makefile index d7e6441df7fe..423b7de61f93 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
@@ -26,7 +26,7 @@ crypto_hash-objs += ahash.o | |||
26 | crypto_hash-objs += shash.o | 26 | crypto_hash-objs += shash.o |
27 | obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o | 27 | obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o |
28 | 28 | ||
29 | obj-$(CONFIG_CRYPTO_PCOMP) += pcompress.o | 29 | obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o |
30 | 30 | ||
31 | cryptomgr-objs := algboss.o testmgr.o | 31 | cryptomgr-objs := algboss.o testmgr.o |
32 | 32 | ||
@@ -61,7 +61,7 @@ obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o | |||
61 | obj-$(CONFIG_CRYPTO_DES) += des_generic.o | 61 | obj-$(CONFIG_CRYPTO_DES) += des_generic.o |
62 | obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o | 62 | obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o |
63 | obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o | 63 | obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o |
64 | obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o | 64 | obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o |
65 | obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o | 65 | obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o |
66 | obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o | 66 | obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o |
67 | obj-$(CONFIG_CRYPTO_AES) += aes_generic.o | 67 | obj-$(CONFIG_CRYPTO_AES) += aes_generic.o |
diff --git a/crypto/algboss.c b/crypto/algboss.c index c3c196b5823a..40bd391f34d9 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c | |||
@@ -206,6 +206,7 @@ err: | |||
206 | return NOTIFY_OK; | 206 | return NOTIFY_OK; |
207 | } | 207 | } |
208 | 208 | ||
209 | #ifdef CONFIG_CRYPTO_MANAGER_TESTS | ||
209 | static int cryptomgr_test(void *data) | 210 | static int cryptomgr_test(void *data) |
210 | { | 211 | { |
211 | struct crypto_test_param *param = data; | 212 | struct crypto_test_param *param = data; |
@@ -266,6 +267,7 @@ err_put_module: | |||
266 | err: | 267 | err: |
267 | return NOTIFY_OK; | 268 | return NOTIFY_OK; |
268 | } | 269 | } |
270 | #endif /* CONFIG_CRYPTO_MANAGER_TESTS */ | ||
269 | 271 | ||
270 | static int cryptomgr_notify(struct notifier_block *this, unsigned long msg, | 272 | static int cryptomgr_notify(struct notifier_block *this, unsigned long msg, |
271 | void *data) | 273 | void *data) |
@@ -273,8 +275,10 @@ static int cryptomgr_notify(struct notifier_block *this, unsigned long msg, | |||
273 | switch (msg) { | 275 | switch (msg) { |
274 | case CRYPTO_MSG_ALG_REQUEST: | 276 | case CRYPTO_MSG_ALG_REQUEST: |
275 | return cryptomgr_schedule_probe(data); | 277 | return cryptomgr_schedule_probe(data); |
278 | #ifdef CONFIG_CRYPTO_MANAGER_TESTS | ||
276 | case CRYPTO_MSG_ALG_REGISTER: | 279 | case CRYPTO_MSG_ALG_REGISTER: |
277 | return cryptomgr_schedule_test(data); | 280 | return cryptomgr_schedule_test(data); |
281 | #endif | ||
278 | } | 282 | } |
279 | 283 | ||
280 | return NOTIFY_DONE; | 284 | return NOTIFY_DONE; |
diff --git a/crypto/authenc.c b/crypto/authenc.c index b9884ee0adb6..a5a22cfcd07b 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c | |||
@@ -616,7 +616,7 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) | |||
616 | auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, | 616 | auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, |
617 | CRYPTO_ALG_TYPE_AHASH_MASK); | 617 | CRYPTO_ALG_TYPE_AHASH_MASK); |
618 | if (IS_ERR(auth)) | 618 | if (IS_ERR(auth)) |
619 | return ERR_PTR(PTR_ERR(auth)); | 619 | return ERR_CAST(auth); |
620 | 620 | ||
621 | auth_base = &auth->base; | 621 | auth_base = &auth->base; |
622 | 622 | ||
diff --git a/crypto/ctr.c b/crypto/ctr.c index 6c3bfabb9d1d..4ca7222cfeb6 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c | |||
@@ -185,7 +185,7 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) | |||
185 | alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, | 185 | alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, |
186 | CRYPTO_ALG_TYPE_MASK); | 186 | CRYPTO_ALG_TYPE_MASK); |
187 | if (IS_ERR(alg)) | 187 | if (IS_ERR(alg)) |
188 | return ERR_PTR(PTR_ERR(alg)); | 188 | return ERR_CAST(alg); |
189 | 189 | ||
190 | /* Block size must be >= 4 bytes. */ | 190 | /* Block size must be >= 4 bytes. */ |
191 | err = -EINVAL; | 191 | err = -EINVAL; |
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 247178cb98ec..de3078215fe6 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c | |||
@@ -24,12 +24,40 @@ | |||
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/notifier.h> | ||
28 | #include <linux/kobject.h> | ||
29 | #include <linux/cpu.h> | ||
27 | #include <crypto/pcrypt.h> | 30 | #include <crypto/pcrypt.h> |
28 | 31 | ||
29 | static struct padata_instance *pcrypt_enc_padata; | 32 | struct padata_pcrypt { |
30 | static struct padata_instance *pcrypt_dec_padata; | 33 | struct padata_instance *pinst; |
31 | static struct workqueue_struct *encwq; | 34 | struct workqueue_struct *wq; |
32 | static struct workqueue_struct *decwq; | 35 | |
36 | /* | ||
37 | * Cpumask for callback CPUs. It should be | ||
38 | * equal to serial cpumask of corresponding padata instance, | ||
39 | * so it is updated when padata notifies us about serial | ||
40 | * cpumask change. | ||
41 | * | ||
42 | * cb_cpumask is protected by RCU. This fact prevents us from | ||
43 | * using cpumask_var_t directly because the actual type of | ||
44 | * cpumsak_var_t depends on kernel configuration(particularly on | ||
45 | * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration | ||
46 | * cpumask_var_t may be either a pointer to the struct cpumask | ||
47 | * or a variable allocated on the stack. Thus we can not safely use | ||
48 | * cpumask_var_t with RCU operations such as rcu_assign_pointer or | ||
49 | * rcu_dereference. So cpumask_var_t is wrapped with struct | ||
50 | * pcrypt_cpumask which makes possible to use it with RCU. | ||
51 | */ | ||
52 | struct pcrypt_cpumask { | ||
53 | cpumask_var_t mask; | ||
54 | } *cb_cpumask; | ||
55 | struct notifier_block nblock; | ||
56 | }; | ||
57 | |||
58 | static struct padata_pcrypt pencrypt; | ||
59 | static struct padata_pcrypt pdecrypt; | ||
60 | static struct kset *pcrypt_kset; | ||
33 | 61 | ||
34 | struct pcrypt_instance_ctx { | 62 | struct pcrypt_instance_ctx { |
35 | struct crypto_spawn spawn; | 63 | struct crypto_spawn spawn; |
@@ -42,25 +70,32 @@ struct pcrypt_aead_ctx { | |||
42 | }; | 70 | }; |
43 | 71 | ||
44 | static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu, | 72 | static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu, |
45 | struct padata_instance *pinst) | 73 | struct padata_pcrypt *pcrypt) |
46 | { | 74 | { |
47 | unsigned int cpu_index, cpu, i; | 75 | unsigned int cpu_index, cpu, i; |
76 | struct pcrypt_cpumask *cpumask; | ||
48 | 77 | ||
49 | cpu = *cb_cpu; | 78 | cpu = *cb_cpu; |
50 | 79 | ||
51 | if (cpumask_test_cpu(cpu, cpu_active_mask)) | 80 | rcu_read_lock_bh(); |
81 | cpumask = rcu_dereference(pcrypt->cb_cpumask); | ||
82 | if (cpumask_test_cpu(cpu, cpumask->mask)) | ||
83 | goto out; | ||
84 | |||
85 | if (!cpumask_weight(cpumask->mask)) | ||
52 | goto out; | 86 | goto out; |
53 | 87 | ||
54 | cpu_index = cpu % cpumask_weight(cpu_active_mask); | 88 | cpu_index = cpu % cpumask_weight(cpumask->mask); |
55 | 89 | ||
56 | cpu = cpumask_first(cpu_active_mask); | 90 | cpu = cpumask_first(cpumask->mask); |
57 | for (i = 0; i < cpu_index; i++) | 91 | for (i = 0; i < cpu_index; i++) |
58 | cpu = cpumask_next(cpu, cpu_active_mask); | 92 | cpu = cpumask_next(cpu, cpumask->mask); |
59 | 93 | ||
60 | *cb_cpu = cpu; | 94 | *cb_cpu = cpu; |
61 | 95 | ||
62 | out: | 96 | out: |
63 | return padata_do_parallel(pinst, padata, cpu); | 97 | rcu_read_unlock_bh(); |
98 | return padata_do_parallel(pcrypt->pinst, padata, cpu); | ||
64 | } | 99 | } |
65 | 100 | ||
66 | static int pcrypt_aead_setkey(struct crypto_aead *parent, | 101 | static int pcrypt_aead_setkey(struct crypto_aead *parent, |
@@ -142,11 +177,9 @@ static int pcrypt_aead_encrypt(struct aead_request *req) | |||
142 | req->cryptlen, req->iv); | 177 | req->cryptlen, req->iv); |
143 | aead_request_set_assoc(creq, req->assoc, req->assoclen); | 178 | aead_request_set_assoc(creq, req->assoc, req->assoclen); |
144 | 179 | ||
145 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata); | 180 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); |
146 | if (err) | 181 | if (!err) |
147 | return err; | 182 | return -EINPROGRESS; |
148 | else | ||
149 | err = crypto_aead_encrypt(creq); | ||
150 | 183 | ||
151 | return err; | 184 | return err; |
152 | } | 185 | } |
@@ -186,11 +219,9 @@ static int pcrypt_aead_decrypt(struct aead_request *req) | |||
186 | req->cryptlen, req->iv); | 219 | req->cryptlen, req->iv); |
187 | aead_request_set_assoc(creq, req->assoc, req->assoclen); | 220 | aead_request_set_assoc(creq, req->assoc, req->assoclen); |
188 | 221 | ||
189 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_dec_padata); | 222 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt); |
190 | if (err) | 223 | if (!err) |
191 | return err; | 224 | return -EINPROGRESS; |
192 | else | ||
193 | err = crypto_aead_decrypt(creq); | ||
194 | 225 | ||
195 | return err; | 226 | return err; |
196 | } | 227 | } |
@@ -232,11 +263,9 @@ static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req) | |||
232 | aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen); | 263 | aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen); |
233 | aead_givcrypt_set_giv(creq, req->giv, req->seq); | 264 | aead_givcrypt_set_giv(creq, req->giv, req->seq); |
234 | 265 | ||
235 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata); | 266 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); |
236 | if (err) | 267 | if (!err) |
237 | return err; | 268 | return -EINPROGRESS; |
238 | else | ||
239 | err = crypto_aead_givencrypt(creq); | ||
240 | 269 | ||
241 | return err; | 270 | return err; |
242 | } | 271 | } |
@@ -376,6 +405,115 @@ static void pcrypt_free(struct crypto_instance *inst) | |||
376 | kfree(inst); | 405 | kfree(inst); |
377 | } | 406 | } |
378 | 407 | ||
408 | static int pcrypt_cpumask_change_notify(struct notifier_block *self, | ||
409 | unsigned long val, void *data) | ||
410 | { | ||
411 | struct padata_pcrypt *pcrypt; | ||
412 | struct pcrypt_cpumask *new_mask, *old_mask; | ||
413 | struct padata_cpumask *cpumask = (struct padata_cpumask *)data; | ||
414 | |||
415 | if (!(val & PADATA_CPU_SERIAL)) | ||
416 | return 0; | ||
417 | |||
418 | pcrypt = container_of(self, struct padata_pcrypt, nblock); | ||
419 | new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL); | ||
420 | if (!new_mask) | ||
421 | return -ENOMEM; | ||
422 | if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) { | ||
423 | kfree(new_mask); | ||
424 | return -ENOMEM; | ||
425 | } | ||
426 | |||
427 | old_mask = pcrypt->cb_cpumask; | ||
428 | |||
429 | cpumask_copy(new_mask->mask, cpumask->cbcpu); | ||
430 | rcu_assign_pointer(pcrypt->cb_cpumask, new_mask); | ||
431 | synchronize_rcu_bh(); | ||
432 | |||
433 | free_cpumask_var(old_mask->mask); | ||
434 | kfree(old_mask); | ||
435 | return 0; | ||
436 | } | ||
437 | |||
438 | static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name) | ||
439 | { | ||
440 | int ret; | ||
441 | |||
442 | pinst->kobj.kset = pcrypt_kset; | ||
443 | ret = kobject_add(&pinst->kobj, NULL, name); | ||
444 | if (!ret) | ||
445 | kobject_uevent(&pinst->kobj, KOBJ_ADD); | ||
446 | |||
447 | return ret; | ||
448 | } | ||
449 | |||
450 | static int pcrypt_init_padata(struct padata_pcrypt *pcrypt, | ||
451 | const char *name) | ||
452 | { | ||
453 | int ret = -ENOMEM; | ||
454 | struct pcrypt_cpumask *mask; | ||
455 | |||
456 | get_online_cpus(); | ||
457 | |||
458 | pcrypt->wq = create_workqueue(name); | ||
459 | if (!pcrypt->wq) | ||
460 | goto err; | ||
461 | |||
462 | pcrypt->pinst = padata_alloc_possible(pcrypt->wq); | ||
463 | if (!pcrypt->pinst) | ||
464 | goto err_destroy_workqueue; | ||
465 | |||
466 | mask = kmalloc(sizeof(*mask), GFP_KERNEL); | ||
467 | if (!mask) | ||
468 | goto err_free_padata; | ||
469 | if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) { | ||
470 | kfree(mask); | ||
471 | goto err_free_padata; | ||
472 | } | ||
473 | |||
474 | cpumask_and(mask->mask, cpu_possible_mask, cpu_active_mask); | ||
475 | rcu_assign_pointer(pcrypt->cb_cpumask, mask); | ||
476 | |||
477 | pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify; | ||
478 | ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); | ||
479 | if (ret) | ||
480 | goto err_free_cpumask; | ||
481 | |||
482 | ret = pcrypt_sysfs_add(pcrypt->pinst, name); | ||
483 | if (ret) | ||
484 | goto err_unregister_notifier; | ||
485 | |||
486 | put_online_cpus(); | ||
487 | |||
488 | return ret; | ||
489 | |||
490 | err_unregister_notifier: | ||
491 | padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); | ||
492 | err_free_cpumask: | ||
493 | free_cpumask_var(mask->mask); | ||
494 | kfree(mask); | ||
495 | err_free_padata: | ||
496 | padata_free(pcrypt->pinst); | ||
497 | err_destroy_workqueue: | ||
498 | destroy_workqueue(pcrypt->wq); | ||
499 | err: | ||
500 | put_online_cpus(); | ||
501 | |||
502 | return ret; | ||
503 | } | ||
504 | |||
505 | static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) | ||
506 | { | ||
507 | kobject_put(&pcrypt->pinst->kobj); | ||
508 | free_cpumask_var(pcrypt->cb_cpumask->mask); | ||
509 | kfree(pcrypt->cb_cpumask); | ||
510 | |||
511 | padata_stop(pcrypt->pinst); | ||
512 | padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); | ||
513 | destroy_workqueue(pcrypt->wq); | ||
514 | padata_free(pcrypt->pinst); | ||
515 | } | ||
516 | |||
379 | static struct crypto_template pcrypt_tmpl = { | 517 | static struct crypto_template pcrypt_tmpl = { |
380 | .name = "pcrypt", | 518 | .name = "pcrypt", |
381 | .alloc = pcrypt_alloc, | 519 | .alloc = pcrypt_alloc, |
@@ -385,52 +523,39 @@ static struct crypto_template pcrypt_tmpl = { | |||
385 | 523 | ||
386 | static int __init pcrypt_init(void) | 524 | static int __init pcrypt_init(void) |
387 | { | 525 | { |
388 | encwq = create_workqueue("pencrypt"); | 526 | int err = -ENOMEM; |
389 | if (!encwq) | ||
390 | goto err; | ||
391 | |||
392 | decwq = create_workqueue("pdecrypt"); | ||
393 | if (!decwq) | ||
394 | goto err_destroy_encwq; | ||
395 | 527 | ||
528 | pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj); | ||
529 | if (!pcrypt_kset) | ||
530 | goto err; | ||
396 | 531 | ||
397 | pcrypt_enc_padata = padata_alloc(cpu_possible_mask, encwq); | 532 | err = pcrypt_init_padata(&pencrypt, "pencrypt"); |
398 | if (!pcrypt_enc_padata) | 533 | if (err) |
399 | goto err_destroy_decwq; | 534 | goto err_unreg_kset; |
400 | 535 | ||
401 | pcrypt_dec_padata = padata_alloc(cpu_possible_mask, decwq); | 536 | err = pcrypt_init_padata(&pdecrypt, "pdecrypt"); |
402 | if (!pcrypt_dec_padata) | 537 | if (err) |
403 | goto err_free_padata; | 538 | goto err_deinit_pencrypt; |
404 | 539 | ||
405 | padata_start(pcrypt_enc_padata); | 540 | padata_start(pencrypt.pinst); |
406 | padata_start(pcrypt_dec_padata); | 541 | padata_start(pdecrypt.pinst); |
407 | 542 | ||
408 | return crypto_register_template(&pcrypt_tmpl); | 543 | return crypto_register_template(&pcrypt_tmpl); |
409 | 544 | ||
410 | err_free_padata: | 545 | err_deinit_pencrypt: |
411 | padata_free(pcrypt_enc_padata); | 546 | pcrypt_fini_padata(&pencrypt); |
412 | 547 | err_unreg_kset: | |
413 | err_destroy_decwq: | 548 | kset_unregister(pcrypt_kset); |
414 | destroy_workqueue(decwq); | ||
415 | |||
416 | err_destroy_encwq: | ||
417 | destroy_workqueue(encwq); | ||
418 | |||
419 | err: | 549 | err: |
420 | return -ENOMEM; | 550 | return err; |
421 | } | 551 | } |
422 | 552 | ||
423 | static void __exit pcrypt_exit(void) | 553 | static void __exit pcrypt_exit(void) |
424 | { | 554 | { |
425 | padata_stop(pcrypt_enc_padata); | 555 | pcrypt_fini_padata(&pencrypt); |
426 | padata_stop(pcrypt_dec_padata); | 556 | pcrypt_fini_padata(&pdecrypt); |
427 | |||
428 | destroy_workqueue(encwq); | ||
429 | destroy_workqueue(decwq); | ||
430 | |||
431 | padata_free(pcrypt_enc_padata); | ||
432 | padata_free(pcrypt_dec_padata); | ||
433 | 557 | ||
558 | kset_unregister(pcrypt_kset); | ||
434 | crypto_unregister_template(&pcrypt_tmpl); | 559 | crypto_unregister_template(&pcrypt_tmpl); |
435 | } | 560 | } |
436 | 561 | ||
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 5c8aaa0cb0b9..abd980c729eb 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
@@ -22,6 +22,17 @@ | |||
22 | #include <crypto/rng.h> | 22 | #include <crypto/rng.h> |
23 | 23 | ||
24 | #include "internal.h" | 24 | #include "internal.h" |
25 | |||
26 | #ifndef CONFIG_CRYPTO_MANAGER_TESTS | ||
27 | |||
28 | /* a perfect nop */ | ||
29 | int alg_test(const char *driver, const char *alg, u32 type, u32 mask) | ||
30 | { | ||
31 | return 0; | ||
32 | } | ||
33 | |||
34 | #else | ||
35 | |||
25 | #include "testmgr.h" | 36 | #include "testmgr.h" |
26 | 37 | ||
27 | /* | 38 | /* |
@@ -2530,4 +2541,7 @@ notest: | |||
2530 | non_fips_alg: | 2541 | non_fips_alg: |
2531 | return -EINVAL; | 2542 | return -EINVAL; |
2532 | } | 2543 | } |
2544 | |||
2545 | #endif /* CONFIG_CRYPTO_MANAGER_TESTS */ | ||
2546 | |||
2533 | EXPORT_SYMBOL_GPL(alg_test); | 2547 | EXPORT_SYMBOL_GPL(alg_test); |
diff --git a/crypto/twofish.c b/crypto/twofish_generic.c index dfcda231f87a..1f07b843e07c 100644 --- a/crypto/twofish.c +++ b/crypto/twofish_generic.c | |||
@@ -212,3 +212,4 @@ module_exit(twofish_mod_fini); | |||
212 | 212 | ||
213 | MODULE_LICENSE("GPL"); | 213 | MODULE_LICENSE("GPL"); |
214 | MODULE_DESCRIPTION ("Twofish Cipher Algorithm"); | 214 | MODULE_DESCRIPTION ("Twofish Cipher Algorithm"); |
215 | MODULE_ALIAS("twofish"); | ||
diff --git a/crypto/xts.c b/crypto/xts.c index d87b0f3102c3..555ecaab1e54 100644 --- a/crypto/xts.c +++ b/crypto/xts.c | |||
@@ -224,7 +224,7 @@ static struct crypto_instance *alloc(struct rtattr **tb) | |||
224 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, | 224 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, |
225 | CRYPTO_ALG_TYPE_MASK); | 225 | CRYPTO_ALG_TYPE_MASK); |
226 | if (IS_ERR(alg)) | 226 | if (IS_ERR(alg)) |
227 | return ERR_PTR(PTR_ERR(alg)); | 227 | return ERR_CAST(alg); |
228 | 228 | ||
229 | inst = crypto_alloc_instance("xts", alg); | 229 | inst = crypto_alloc_instance("xts", alg); |
230 | if (IS_ERR(inst)) | 230 | if (IS_ERR(inst)) |
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c index 0f9cbf1aaf15..101d5f235547 100644 --- a/drivers/char/hw_random/n2-drv.c +++ b/drivers/char/hw_random/n2-drv.c | |||
@@ -387,7 +387,7 @@ static int n2rng_init_control(struct n2rng *np) | |||
387 | 387 | ||
388 | static int n2rng_data_read(struct hwrng *rng, u32 *data) | 388 | static int n2rng_data_read(struct hwrng *rng, u32 *data) |
389 | { | 389 | { |
390 | struct n2rng *np = (struct n2rng *) rng->priv; | 390 | struct n2rng *np = rng->priv; |
391 | unsigned long ra = __pa(&np->test_data); | 391 | unsigned long ra = __pa(&np->test_data); |
392 | int len; | 392 | int len; |
393 | 393 | ||
diff --git a/drivers/char/random.c b/drivers/char/random.c index 8d85587b6d4f..caef35a46890 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -407,8 +407,8 @@ struct entropy_store { | |||
407 | struct poolinfo *poolinfo; | 407 | struct poolinfo *poolinfo; |
408 | __u32 *pool; | 408 | __u32 *pool; |
409 | const char *name; | 409 | const char *name; |
410 | int limit; | ||
411 | struct entropy_store *pull; | 410 | struct entropy_store *pull; |
411 | int limit; | ||
412 | 412 | ||
413 | /* read-write data: */ | 413 | /* read-write data: */ |
414 | spinlock_t lock; | 414 | spinlock_t lock; |
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index 09389dd2f96b..219d09cbb0d1 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c | |||
@@ -573,7 +573,7 @@ geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
573 | } | 573 | } |
574 | 574 | ||
575 | static struct pci_device_id geode_aes_tbl[] = { | 575 | static struct pci_device_id geode_aes_tbl[] = { |
576 | { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES, PCI_ANY_ID, PCI_ANY_ID} , | 576 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), } , |
577 | { 0, } | 577 | { 0, } |
578 | }; | 578 | }; |
579 | 579 | ||
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 16fce3aadf4d..e449ac5627a5 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c | |||
@@ -2018,7 +2018,6 @@ static void hifn_flush(struct hifn_device *dev) | |||
2018 | { | 2018 | { |
2019 | unsigned long flags; | 2019 | unsigned long flags; |
2020 | struct crypto_async_request *async_req; | 2020 | struct crypto_async_request *async_req; |
2021 | struct hifn_context *ctx; | ||
2022 | struct ablkcipher_request *req; | 2021 | struct ablkcipher_request *req; |
2023 | struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; | 2022 | struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; |
2024 | int i; | 2023 | int i; |
@@ -2035,7 +2034,6 @@ static void hifn_flush(struct hifn_device *dev) | |||
2035 | 2034 | ||
2036 | spin_lock_irqsave(&dev->lock, flags); | 2035 | spin_lock_irqsave(&dev->lock, flags); |
2037 | while ((async_req = crypto_dequeue_request(&dev->queue))) { | 2036 | while ((async_req = crypto_dequeue_request(&dev->queue))) { |
2038 | ctx = crypto_tfm_ctx(async_req->tfm); | ||
2039 | req = container_of(async_req, struct ablkcipher_request, base); | 2037 | req = container_of(async_req, struct ablkcipher_request, base); |
2040 | spin_unlock_irqrestore(&dev->lock, flags); | 2038 | spin_unlock_irqrestore(&dev->lock, flags); |
2041 | 2039 | ||
@@ -2139,7 +2137,6 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op, | |||
2139 | static int hifn_process_queue(struct hifn_device *dev) | 2137 | static int hifn_process_queue(struct hifn_device *dev) |
2140 | { | 2138 | { |
2141 | struct crypto_async_request *async_req, *backlog; | 2139 | struct crypto_async_request *async_req, *backlog; |
2142 | struct hifn_context *ctx; | ||
2143 | struct ablkcipher_request *req; | 2140 | struct ablkcipher_request *req; |
2144 | unsigned long flags; | 2141 | unsigned long flags; |
2145 | int err = 0; | 2142 | int err = 0; |
@@ -2156,7 +2153,6 @@ static int hifn_process_queue(struct hifn_device *dev) | |||
2156 | if (backlog) | 2153 | if (backlog) |
2157 | backlog->complete(backlog, -EINPROGRESS); | 2154 | backlog->complete(backlog, -EINPROGRESS); |
2158 | 2155 | ||
2159 | ctx = crypto_tfm_ctx(async_req->tfm); | ||
2160 | req = container_of(async_req, struct ablkcipher_request, base); | 2156 | req = container_of(async_req, struct ablkcipher_request, base); |
2161 | 2157 | ||
2162 | err = hifn_handle_req(req); | 2158 | err = hifn_handle_req(req); |
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index e095422b58dd..7d279e578df5 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c | |||
@@ -1055,20 +1055,20 @@ static int mv_probe(struct platform_device *pdev) | |||
1055 | cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); | 1055 | cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); |
1056 | if (IS_ERR(cp->queue_th)) { | 1056 | if (IS_ERR(cp->queue_th)) { |
1057 | ret = PTR_ERR(cp->queue_th); | 1057 | ret = PTR_ERR(cp->queue_th); |
1058 | goto err_thread; | 1058 | goto err_unmap_sram; |
1059 | } | 1059 | } |
1060 | 1060 | ||
1061 | ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), | 1061 | ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), |
1062 | cp); | 1062 | cp); |
1063 | if (ret) | 1063 | if (ret) |
1064 | goto err_unmap_sram; | 1064 | goto err_thread; |
1065 | 1065 | ||
1066 | writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); | 1066 | writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); |
1067 | writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); | 1067 | writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); |
1068 | 1068 | ||
1069 | ret = crypto_register_alg(&mv_aes_alg_ecb); | 1069 | ret = crypto_register_alg(&mv_aes_alg_ecb); |
1070 | if (ret) | 1070 | if (ret) |
1071 | goto err_reg; | 1071 | goto err_irq; |
1072 | 1072 | ||
1073 | ret = crypto_register_alg(&mv_aes_alg_cbc); | 1073 | ret = crypto_register_alg(&mv_aes_alg_cbc); |
1074 | if (ret) | 1074 | if (ret) |
@@ -1091,9 +1091,9 @@ static int mv_probe(struct platform_device *pdev) | |||
1091 | return 0; | 1091 | return 0; |
1092 | err_unreg_ecb: | 1092 | err_unreg_ecb: |
1093 | crypto_unregister_alg(&mv_aes_alg_ecb); | 1093 | crypto_unregister_alg(&mv_aes_alg_ecb); |
1094 | err_thread: | 1094 | err_irq: |
1095 | free_irq(irq, cp); | 1095 | free_irq(irq, cp); |
1096 | err_reg: | 1096 | err_thread: |
1097 | kthread_stop(cp->queue_th); | 1097 | kthread_stop(cp->queue_th); |
1098 | err_unmap_sram: | 1098 | err_unmap_sram: |
1099 | iounmap(cp->sram); | 1099 | iounmap(cp->sram); |
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 23163fda5035..b99c38f23d61 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c | |||
@@ -239,21 +239,57 @@ static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) | |||
239 | } | 239 | } |
240 | #endif | 240 | #endif |
241 | 241 | ||
242 | struct n2_base_ctx { | 242 | struct n2_ahash_alg { |
243 | struct list_head list; | 243 | struct list_head entry; |
244 | const char *hash_zero; | ||
245 | const u32 *hash_init; | ||
246 | u8 hw_op_hashsz; | ||
247 | u8 digest_size; | ||
248 | u8 auth_type; | ||
249 | u8 hmac_type; | ||
250 | struct ahash_alg alg; | ||
244 | }; | 251 | }; |
245 | 252 | ||
246 | static void n2_base_ctx_init(struct n2_base_ctx *ctx) | 253 | static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm) |
247 | { | 254 | { |
248 | INIT_LIST_HEAD(&ctx->list); | 255 | struct crypto_alg *alg = tfm->__crt_alg; |
256 | struct ahash_alg *ahash_alg; | ||
257 | |||
258 | ahash_alg = container_of(alg, struct ahash_alg, halg.base); | ||
259 | |||
260 | return container_of(ahash_alg, struct n2_ahash_alg, alg); | ||
249 | } | 261 | } |
250 | 262 | ||
251 | struct n2_hash_ctx { | 263 | struct n2_hmac_alg { |
252 | struct n2_base_ctx base; | 264 | const char *child_alg; |
265 | struct n2_ahash_alg derived; | ||
266 | }; | ||
267 | |||
268 | static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm) | ||
269 | { | ||
270 | struct crypto_alg *alg = tfm->__crt_alg; | ||
271 | struct ahash_alg *ahash_alg; | ||
272 | |||
273 | ahash_alg = container_of(alg, struct ahash_alg, halg.base); | ||
274 | |||
275 | return container_of(ahash_alg, struct n2_hmac_alg, derived.alg); | ||
276 | } | ||
253 | 277 | ||
278 | struct n2_hash_ctx { | ||
254 | struct crypto_ahash *fallback_tfm; | 279 | struct crypto_ahash *fallback_tfm; |
255 | }; | 280 | }; |
256 | 281 | ||
282 | #define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */ | ||
283 | |||
284 | struct n2_hmac_ctx { | ||
285 | struct n2_hash_ctx base; | ||
286 | |||
287 | struct crypto_shash *child_shash; | ||
288 | |||
289 | int hash_key_len; | ||
290 | unsigned char hash_key[N2_HASH_KEY_MAX]; | ||
291 | }; | ||
292 | |||
257 | struct n2_hash_req_ctx { | 293 | struct n2_hash_req_ctx { |
258 | union { | 294 | union { |
259 | struct md5_state md5; | 295 | struct md5_state md5; |
@@ -261,9 +297,6 @@ struct n2_hash_req_ctx { | |||
261 | struct sha256_state sha256; | 297 | struct sha256_state sha256; |
262 | } u; | 298 | } u; |
263 | 299 | ||
264 | unsigned char hash_key[64]; | ||
265 | unsigned char keyed_zero_hash[32]; | ||
266 | |||
267 | struct ahash_request fallback_req; | 300 | struct ahash_request fallback_req; |
268 | }; | 301 | }; |
269 | 302 | ||
@@ -356,6 +389,94 @@ static void n2_hash_cra_exit(struct crypto_tfm *tfm) | |||
356 | crypto_free_ahash(ctx->fallback_tfm); | 389 | crypto_free_ahash(ctx->fallback_tfm); |
357 | } | 390 | } |
358 | 391 | ||
392 | static int n2_hmac_cra_init(struct crypto_tfm *tfm) | ||
393 | { | ||
394 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; | ||
395 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | ||
396 | struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); | ||
397 | struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm); | ||
398 | struct crypto_ahash *fallback_tfm; | ||
399 | struct crypto_shash *child_shash; | ||
400 | int err; | ||
401 | |||
402 | fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, | ||
403 | CRYPTO_ALG_NEED_FALLBACK); | ||
404 | if (IS_ERR(fallback_tfm)) { | ||
405 | pr_warning("Fallback driver '%s' could not be loaded!\n", | ||
406 | fallback_driver_name); | ||
407 | err = PTR_ERR(fallback_tfm); | ||
408 | goto out; | ||
409 | } | ||
410 | |||
411 | child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0); | ||
412 | if (IS_ERR(child_shash)) { | ||
413 | pr_warning("Child shash '%s' could not be loaded!\n", | ||
414 | n2alg->child_alg); | ||
415 | err = PTR_ERR(child_shash); | ||
416 | goto out_free_fallback; | ||
417 | } | ||
418 | |||
419 | crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + | ||
420 | crypto_ahash_reqsize(fallback_tfm))); | ||
421 | |||
422 | ctx->child_shash = child_shash; | ||
423 | ctx->base.fallback_tfm = fallback_tfm; | ||
424 | return 0; | ||
425 | |||
426 | out_free_fallback: | ||
427 | crypto_free_ahash(fallback_tfm); | ||
428 | |||
429 | out: | ||
430 | return err; | ||
431 | } | ||
432 | |||
433 | static void n2_hmac_cra_exit(struct crypto_tfm *tfm) | ||
434 | { | ||
435 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | ||
436 | struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); | ||
437 | |||
438 | crypto_free_ahash(ctx->base.fallback_tfm); | ||
439 | crypto_free_shash(ctx->child_shash); | ||
440 | } | ||
441 | |||
442 | static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key, | ||
443 | unsigned int keylen) | ||
444 | { | ||
445 | struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); | ||
446 | struct crypto_shash *child_shash = ctx->child_shash; | ||
447 | struct crypto_ahash *fallback_tfm; | ||
448 | struct { | ||
449 | struct shash_desc shash; | ||
450 | char ctx[crypto_shash_descsize(child_shash)]; | ||
451 | } desc; | ||
452 | int err, bs, ds; | ||
453 | |||
454 | fallback_tfm = ctx->base.fallback_tfm; | ||
455 | err = crypto_ahash_setkey(fallback_tfm, key, keylen); | ||
456 | if (err) | ||
457 | return err; | ||
458 | |||
459 | desc.shash.tfm = child_shash; | ||
460 | desc.shash.flags = crypto_ahash_get_flags(tfm) & | ||
461 | CRYPTO_TFM_REQ_MAY_SLEEP; | ||
462 | |||
463 | bs = crypto_shash_blocksize(child_shash); | ||
464 | ds = crypto_shash_digestsize(child_shash); | ||
465 | BUG_ON(ds > N2_HASH_KEY_MAX); | ||
466 | if (keylen > bs) { | ||
467 | err = crypto_shash_digest(&desc.shash, key, keylen, | ||
468 | ctx->hash_key); | ||
469 | if (err) | ||
470 | return err; | ||
471 | keylen = ds; | ||
472 | } else if (keylen <= N2_HASH_KEY_MAX) | ||
473 | memcpy(ctx->hash_key, key, keylen); | ||
474 | |||
475 | ctx->hash_key_len = keylen; | ||
476 | |||
477 | return err; | ||
478 | } | ||
479 | |||
359 | static unsigned long wait_for_tail(struct spu_queue *qp) | 480 | static unsigned long wait_for_tail(struct spu_queue *qp) |
360 | { | 481 | { |
361 | unsigned long head, hv_ret; | 482 | unsigned long head, hv_ret; |
@@ -385,12 +506,12 @@ static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, | |||
385 | return hv_ret; | 506 | return hv_ret; |
386 | } | 507 | } |
387 | 508 | ||
388 | static int n2_hash_async_digest(struct ahash_request *req, | 509 | static int n2_do_async_digest(struct ahash_request *req, |
389 | unsigned int auth_type, unsigned int digest_size, | 510 | unsigned int auth_type, unsigned int digest_size, |
390 | unsigned int result_size, void *hash_loc) | 511 | unsigned int result_size, void *hash_loc, |
512 | unsigned long auth_key, unsigned int auth_key_len) | ||
391 | { | 513 | { |
392 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 514 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
393 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
394 | struct cwq_initial_entry *ent; | 515 | struct cwq_initial_entry *ent; |
395 | struct crypto_hash_walk walk; | 516 | struct crypto_hash_walk walk; |
396 | struct spu_queue *qp; | 517 | struct spu_queue *qp; |
@@ -403,6 +524,7 @@ static int n2_hash_async_digest(struct ahash_request *req, | |||
403 | */ | 524 | */ |
404 | if (unlikely(req->nbytes > (1 << 16))) { | 525 | if (unlikely(req->nbytes > (1 << 16))) { |
405 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | 526 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
527 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
406 | 528 | ||
407 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); | 529 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
408 | rctx->fallback_req.base.flags = | 530 | rctx->fallback_req.base.flags = |
@@ -414,8 +536,6 @@ static int n2_hash_async_digest(struct ahash_request *req, | |||
414 | return crypto_ahash_digest(&rctx->fallback_req); | 536 | return crypto_ahash_digest(&rctx->fallback_req); |
415 | } | 537 | } |
416 | 538 | ||
417 | n2_base_ctx_init(&ctx->base); | ||
418 | |||
419 | nbytes = crypto_hash_walk_first(req, &walk); | 539 | nbytes = crypto_hash_walk_first(req, &walk); |
420 | 540 | ||
421 | cpu = get_cpu(); | 541 | cpu = get_cpu(); |
@@ -430,13 +550,13 @@ static int n2_hash_async_digest(struct ahash_request *req, | |||
430 | */ | 550 | */ |
431 | ent = qp->q + qp->tail; | 551 | ent = qp->q + qp->tail; |
432 | 552 | ||
433 | ent->control = control_word_base(nbytes, 0, 0, | 553 | ent->control = control_word_base(nbytes, auth_key_len, 0, |
434 | auth_type, digest_size, | 554 | auth_type, digest_size, |
435 | false, true, false, false, | 555 | false, true, false, false, |
436 | OPCODE_INPLACE_BIT | | 556 | OPCODE_INPLACE_BIT | |
437 | OPCODE_AUTH_MAC); | 557 | OPCODE_AUTH_MAC); |
438 | ent->src_addr = __pa(walk.data); | 558 | ent->src_addr = __pa(walk.data); |
439 | ent->auth_key_addr = 0UL; | 559 | ent->auth_key_addr = auth_key; |
440 | ent->auth_iv_addr = __pa(hash_loc); | 560 | ent->auth_iv_addr = __pa(hash_loc); |
441 | ent->final_auth_state_addr = 0UL; | 561 | ent->final_auth_state_addr = 0UL; |
442 | ent->enc_key_addr = 0UL; | 562 | ent->enc_key_addr = 0UL; |
@@ -475,114 +595,55 @@ out: | |||
475 | return err; | 595 | return err; |
476 | } | 596 | } |
477 | 597 | ||
478 | static int n2_md5_async_digest(struct ahash_request *req) | 598 | static int n2_hash_async_digest(struct ahash_request *req) |
479 | { | 599 | { |
600 | struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm); | ||
480 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | 601 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
481 | struct md5_state *m = &rctx->u.md5; | 602 | int ds; |
482 | 603 | ||
604 | ds = n2alg->digest_size; | ||
483 | if (unlikely(req->nbytes == 0)) { | 605 | if (unlikely(req->nbytes == 0)) { |
484 | static const char md5_zero[MD5_DIGEST_SIZE] = { | 606 | memcpy(req->result, n2alg->hash_zero, ds); |
485 | 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, | ||
486 | 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, | ||
487 | }; | ||
488 | |||
489 | memcpy(req->result, md5_zero, MD5_DIGEST_SIZE); | ||
490 | return 0; | 607 | return 0; |
491 | } | 608 | } |
492 | m->hash[0] = cpu_to_le32(0x67452301); | 609 | memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz); |
493 | m->hash[1] = cpu_to_le32(0xefcdab89); | ||
494 | m->hash[2] = cpu_to_le32(0x98badcfe); | ||
495 | m->hash[3] = cpu_to_le32(0x10325476); | ||
496 | 610 | ||
497 | return n2_hash_async_digest(req, AUTH_TYPE_MD5, | 611 | return n2_do_async_digest(req, n2alg->auth_type, |
498 | MD5_DIGEST_SIZE, MD5_DIGEST_SIZE, | 612 | n2alg->hw_op_hashsz, ds, |
499 | m->hash); | 613 | &rctx->u, 0UL, 0); |
500 | } | 614 | } |
501 | 615 | ||
502 | static int n2_sha1_async_digest(struct ahash_request *req) | 616 | static int n2_hmac_async_digest(struct ahash_request *req) |
503 | { | 617 | { |
618 | struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm); | ||
504 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | 619 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
505 | struct sha1_state *s = &rctx->u.sha1; | 620 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
506 | 621 | struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); | |
507 | if (unlikely(req->nbytes == 0)) { | 622 | int ds; |
508 | static const char sha1_zero[SHA1_DIGEST_SIZE] = { | ||
509 | 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, | ||
510 | 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, | ||
511 | 0x07, 0x09 | ||
512 | }; | ||
513 | |||
514 | memcpy(req->result, sha1_zero, SHA1_DIGEST_SIZE); | ||
515 | return 0; | ||
516 | } | ||
517 | s->state[0] = SHA1_H0; | ||
518 | s->state[1] = SHA1_H1; | ||
519 | s->state[2] = SHA1_H2; | ||
520 | s->state[3] = SHA1_H3; | ||
521 | s->state[4] = SHA1_H4; | ||
522 | |||
523 | return n2_hash_async_digest(req, AUTH_TYPE_SHA1, | ||
524 | SHA1_DIGEST_SIZE, SHA1_DIGEST_SIZE, | ||
525 | s->state); | ||
526 | } | ||
527 | |||
528 | static int n2_sha256_async_digest(struct ahash_request *req) | ||
529 | { | ||
530 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | ||
531 | struct sha256_state *s = &rctx->u.sha256; | ||
532 | |||
533 | if (req->nbytes == 0) { | ||
534 | static const char sha256_zero[SHA256_DIGEST_SIZE] = { | ||
535 | 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, | ||
536 | 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, | ||
537 | 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, | ||
538 | 0x1b, 0x78, 0x52, 0xb8, 0x55 | ||
539 | }; | ||
540 | |||
541 | memcpy(req->result, sha256_zero, SHA256_DIGEST_SIZE); | ||
542 | return 0; | ||
543 | } | ||
544 | s->state[0] = SHA256_H0; | ||
545 | s->state[1] = SHA256_H1; | ||
546 | s->state[2] = SHA256_H2; | ||
547 | s->state[3] = SHA256_H3; | ||
548 | s->state[4] = SHA256_H4; | ||
549 | s->state[5] = SHA256_H5; | ||
550 | s->state[6] = SHA256_H6; | ||
551 | s->state[7] = SHA256_H7; | ||
552 | |||
553 | return n2_hash_async_digest(req, AUTH_TYPE_SHA256, | ||
554 | SHA256_DIGEST_SIZE, SHA256_DIGEST_SIZE, | ||
555 | s->state); | ||
556 | } | ||
557 | 623 | ||
558 | static int n2_sha224_async_digest(struct ahash_request *req) | 624 | ds = n2alg->derived.digest_size; |
559 | { | 625 | if (unlikely(req->nbytes == 0) || |
560 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); | 626 | unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) { |
561 | struct sha256_state *s = &rctx->u.sha256; | 627 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
628 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
562 | 629 | ||
563 | if (req->nbytes == 0) { | 630 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
564 | static const char sha224_zero[SHA224_DIGEST_SIZE] = { | 631 | rctx->fallback_req.base.flags = |
565 | 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, | 632 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
566 | 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, | 633 | rctx->fallback_req.nbytes = req->nbytes; |
567 | 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, | 634 | rctx->fallback_req.src = req->src; |
568 | 0x2f | 635 | rctx->fallback_req.result = req->result; |
569 | }; | ||
570 | 636 | ||
571 | memcpy(req->result, sha224_zero, SHA224_DIGEST_SIZE); | 637 | return crypto_ahash_digest(&rctx->fallback_req); |
572 | return 0; | ||
573 | } | 638 | } |
574 | s->state[0] = SHA224_H0; | 639 | memcpy(&rctx->u, n2alg->derived.hash_init, |
575 | s->state[1] = SHA224_H1; | 640 | n2alg->derived.hw_op_hashsz); |
576 | s->state[2] = SHA224_H2; | ||
577 | s->state[3] = SHA224_H3; | ||
578 | s->state[4] = SHA224_H4; | ||
579 | s->state[5] = SHA224_H5; | ||
580 | s->state[6] = SHA224_H6; | ||
581 | s->state[7] = SHA224_H7; | ||
582 | 641 | ||
583 | return n2_hash_async_digest(req, AUTH_TYPE_SHA256, | 642 | return n2_do_async_digest(req, n2alg->derived.hmac_type, |
584 | SHA256_DIGEST_SIZE, SHA224_DIGEST_SIZE, | 643 | n2alg->derived.hw_op_hashsz, ds, |
585 | s->state); | 644 | &rctx->u, |
645 | __pa(&ctx->hash_key), | ||
646 | ctx->hash_key_len); | ||
586 | } | 647 | } |
587 | 648 | ||
588 | struct n2_cipher_context { | 649 | struct n2_cipher_context { |
@@ -1209,35 +1270,92 @@ static LIST_HEAD(cipher_algs); | |||
1209 | 1270 | ||
1210 | struct n2_hash_tmpl { | 1271 | struct n2_hash_tmpl { |
1211 | const char *name; | 1272 | const char *name; |
1212 | int (*digest)(struct ahash_request *req); | 1273 | const char *hash_zero; |
1274 | const u32 *hash_init; | ||
1275 | u8 hw_op_hashsz; | ||
1213 | u8 digest_size; | 1276 | u8 digest_size; |
1214 | u8 block_size; | 1277 | u8 block_size; |
1278 | u8 auth_type; | ||
1279 | u8 hmac_type; | ||
1280 | }; | ||
1281 | |||
1282 | static const char md5_zero[MD5_DIGEST_SIZE] = { | ||
1283 | 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, | ||
1284 | 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, | ||
1285 | }; | ||
1286 | static const u32 md5_init[MD5_HASH_WORDS] = { | ||
1287 | cpu_to_le32(0x67452301), | ||
1288 | cpu_to_le32(0xefcdab89), | ||
1289 | cpu_to_le32(0x98badcfe), | ||
1290 | cpu_to_le32(0x10325476), | ||
1291 | }; | ||
1292 | static const char sha1_zero[SHA1_DIGEST_SIZE] = { | ||
1293 | 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, | ||
1294 | 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, | ||
1295 | 0x07, 0x09 | ||
1215 | }; | 1296 | }; |
1297 | static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { | ||
1298 | SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, | ||
1299 | }; | ||
1300 | static const char sha256_zero[SHA256_DIGEST_SIZE] = { | ||
1301 | 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, | ||
1302 | 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, | ||
1303 | 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, | ||
1304 | 0x1b, 0x78, 0x52, 0xb8, 0x55 | ||
1305 | }; | ||
1306 | static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { | ||
1307 | SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, | ||
1308 | SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, | ||
1309 | }; | ||
1310 | static const char sha224_zero[SHA224_DIGEST_SIZE] = { | ||
1311 | 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, | ||
1312 | 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, | ||
1313 | 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, | ||
1314 | 0x2f | ||
1315 | }; | ||
1316 | static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { | ||
1317 | SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, | ||
1318 | SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, | ||
1319 | }; | ||
1320 | |||
1216 | static const struct n2_hash_tmpl hash_tmpls[] = { | 1321 | static const struct n2_hash_tmpl hash_tmpls[] = { |
1217 | { .name = "md5", | 1322 | { .name = "md5", |
1218 | .digest = n2_md5_async_digest, | 1323 | .hash_zero = md5_zero, |
1324 | .hash_init = md5_init, | ||
1325 | .auth_type = AUTH_TYPE_MD5, | ||
1326 | .hmac_type = AUTH_TYPE_HMAC_MD5, | ||
1327 | .hw_op_hashsz = MD5_DIGEST_SIZE, | ||
1219 | .digest_size = MD5_DIGEST_SIZE, | 1328 | .digest_size = MD5_DIGEST_SIZE, |
1220 | .block_size = MD5_HMAC_BLOCK_SIZE }, | 1329 | .block_size = MD5_HMAC_BLOCK_SIZE }, |
1221 | { .name = "sha1", | 1330 | { .name = "sha1", |
1222 | .digest = n2_sha1_async_digest, | 1331 | .hash_zero = sha1_zero, |
1332 | .hash_init = sha1_init, | ||
1333 | .auth_type = AUTH_TYPE_SHA1, | ||
1334 | .hmac_type = AUTH_TYPE_HMAC_SHA1, | ||
1335 | .hw_op_hashsz = SHA1_DIGEST_SIZE, | ||
1223 | .digest_size = SHA1_DIGEST_SIZE, | 1336 | .digest_size = SHA1_DIGEST_SIZE, |
1224 | .block_size = SHA1_BLOCK_SIZE }, | 1337 | .block_size = SHA1_BLOCK_SIZE }, |
1225 | { .name = "sha256", | 1338 | { .name = "sha256", |
1226 | .digest = n2_sha256_async_digest, | 1339 | .hash_zero = sha256_zero, |
1340 | .hash_init = sha256_init, | ||
1341 | .auth_type = AUTH_TYPE_SHA256, | ||
1342 | .hmac_type = AUTH_TYPE_HMAC_SHA256, | ||
1343 | .hw_op_hashsz = SHA256_DIGEST_SIZE, | ||
1227 | .digest_size = SHA256_DIGEST_SIZE, | 1344 | .digest_size = SHA256_DIGEST_SIZE, |
1228 | .block_size = SHA256_BLOCK_SIZE }, | 1345 | .block_size = SHA256_BLOCK_SIZE }, |
1229 | { .name = "sha224", | 1346 | { .name = "sha224", |
1230 | .digest = n2_sha224_async_digest, | 1347 | .hash_zero = sha224_zero, |
1348 | .hash_init = sha224_init, | ||
1349 | .auth_type = AUTH_TYPE_SHA256, | ||
1350 | .hmac_type = AUTH_TYPE_RESERVED, | ||
1351 | .hw_op_hashsz = SHA256_DIGEST_SIZE, | ||
1231 | .digest_size = SHA224_DIGEST_SIZE, | 1352 | .digest_size = SHA224_DIGEST_SIZE, |
1232 | .block_size = SHA224_BLOCK_SIZE }, | 1353 | .block_size = SHA224_BLOCK_SIZE }, |
1233 | }; | 1354 | }; |
1234 | #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) | 1355 | #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) |
1235 | 1356 | ||
1236 | struct n2_ahash_alg { | ||
1237 | struct list_head entry; | ||
1238 | struct ahash_alg alg; | ||
1239 | }; | ||
1240 | static LIST_HEAD(ahash_algs); | 1357 | static LIST_HEAD(ahash_algs); |
1358 | static LIST_HEAD(hmac_algs); | ||
1241 | 1359 | ||
1242 | static int algs_registered; | 1360 | static int algs_registered; |
1243 | 1361 | ||
@@ -1245,12 +1363,18 @@ static void __n2_unregister_algs(void) | |||
1245 | { | 1363 | { |
1246 | struct n2_cipher_alg *cipher, *cipher_tmp; | 1364 | struct n2_cipher_alg *cipher, *cipher_tmp; |
1247 | struct n2_ahash_alg *alg, *alg_tmp; | 1365 | struct n2_ahash_alg *alg, *alg_tmp; |
1366 | struct n2_hmac_alg *hmac, *hmac_tmp; | ||
1248 | 1367 | ||
1249 | list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) { | 1368 | list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) { |
1250 | crypto_unregister_alg(&cipher->alg); | 1369 | crypto_unregister_alg(&cipher->alg); |
1251 | list_del(&cipher->entry); | 1370 | list_del(&cipher->entry); |
1252 | kfree(cipher); | 1371 | kfree(cipher); |
1253 | } | 1372 | } |
1373 | list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) { | ||
1374 | crypto_unregister_ahash(&hmac->derived.alg); | ||
1375 | list_del(&hmac->derived.entry); | ||
1376 | kfree(hmac); | ||
1377 | } | ||
1254 | list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) { | 1378 | list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) { |
1255 | crypto_unregister_ahash(&alg->alg); | 1379 | crypto_unregister_ahash(&alg->alg); |
1256 | list_del(&alg->entry); | 1380 | list_del(&alg->entry); |
@@ -1290,8 +1414,49 @@ static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl) | |||
1290 | list_add(&p->entry, &cipher_algs); | 1414 | list_add(&p->entry, &cipher_algs); |
1291 | err = crypto_register_alg(alg); | 1415 | err = crypto_register_alg(alg); |
1292 | if (err) { | 1416 | if (err) { |
1417 | pr_err("%s alg registration failed\n", alg->cra_name); | ||
1293 | list_del(&p->entry); | 1418 | list_del(&p->entry); |
1294 | kfree(p); | 1419 | kfree(p); |
1420 | } else { | ||
1421 | pr_info("%s alg registered\n", alg->cra_name); | ||
1422 | } | ||
1423 | return err; | ||
1424 | } | ||
1425 | |||
1426 | static int __devinit __n2_register_one_hmac(struct n2_ahash_alg *n2ahash) | ||
1427 | { | ||
1428 | struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
1429 | struct ahash_alg *ahash; | ||
1430 | struct crypto_alg *base; | ||
1431 | int err; | ||
1432 | |||
1433 | if (!p) | ||
1434 | return -ENOMEM; | ||
1435 | |||
1436 | p->child_alg = n2ahash->alg.halg.base.cra_name; | ||
1437 | memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg)); | ||
1438 | INIT_LIST_HEAD(&p->derived.entry); | ||
1439 | |||
1440 | ahash = &p->derived.alg; | ||
1441 | ahash->digest = n2_hmac_async_digest; | ||
1442 | ahash->setkey = n2_hmac_async_setkey; | ||
1443 | |||
1444 | base = &ahash->halg.base; | ||
1445 | snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg); | ||
1446 | snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg); | ||
1447 | |||
1448 | base->cra_ctxsize = sizeof(struct n2_hmac_ctx); | ||
1449 | base->cra_init = n2_hmac_cra_init; | ||
1450 | base->cra_exit = n2_hmac_cra_exit; | ||
1451 | |||
1452 | list_add(&p->derived.entry, &hmac_algs); | ||
1453 | err = crypto_register_ahash(ahash); | ||
1454 | if (err) { | ||
1455 | pr_err("%s alg registration failed\n", base->cra_name); | ||
1456 | list_del(&p->derived.entry); | ||
1457 | kfree(p); | ||
1458 | } else { | ||
1459 | pr_info("%s alg registered\n", base->cra_name); | ||
1295 | } | 1460 | } |
1296 | return err; | 1461 | return err; |
1297 | } | 1462 | } |
@@ -1307,12 +1472,19 @@ static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) | |||
1307 | if (!p) | 1472 | if (!p) |
1308 | return -ENOMEM; | 1473 | return -ENOMEM; |
1309 | 1474 | ||
1475 | p->hash_zero = tmpl->hash_zero; | ||
1476 | p->hash_init = tmpl->hash_init; | ||
1477 | p->auth_type = tmpl->auth_type; | ||
1478 | p->hmac_type = tmpl->hmac_type; | ||
1479 | p->hw_op_hashsz = tmpl->hw_op_hashsz; | ||
1480 | p->digest_size = tmpl->digest_size; | ||
1481 | |||
1310 | ahash = &p->alg; | 1482 | ahash = &p->alg; |
1311 | ahash->init = n2_hash_async_init; | 1483 | ahash->init = n2_hash_async_init; |
1312 | ahash->update = n2_hash_async_update; | 1484 | ahash->update = n2_hash_async_update; |
1313 | ahash->final = n2_hash_async_final; | 1485 | ahash->final = n2_hash_async_final; |
1314 | ahash->finup = n2_hash_async_finup; | 1486 | ahash->finup = n2_hash_async_finup; |
1315 | ahash->digest = tmpl->digest; | 1487 | ahash->digest = n2_hash_async_digest; |
1316 | 1488 | ||
1317 | halg = &ahash->halg; | 1489 | halg = &ahash->halg; |
1318 | halg->digestsize = tmpl->digest_size; | 1490 | halg->digestsize = tmpl->digest_size; |
@@ -1331,9 +1503,14 @@ static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) | |||
1331 | list_add(&p->entry, &ahash_algs); | 1503 | list_add(&p->entry, &ahash_algs); |
1332 | err = crypto_register_ahash(ahash); | 1504 | err = crypto_register_ahash(ahash); |
1333 | if (err) { | 1505 | if (err) { |
1506 | pr_err("%s alg registration failed\n", base->cra_name); | ||
1334 | list_del(&p->entry); | 1507 | list_del(&p->entry); |
1335 | kfree(p); | 1508 | kfree(p); |
1509 | } else { | ||
1510 | pr_info("%s alg registered\n", base->cra_name); | ||
1336 | } | 1511 | } |
1512 | if (!err && p->hmac_type != AUTH_TYPE_RESERVED) | ||
1513 | err = __n2_register_one_hmac(p); | ||
1337 | return err; | 1514 | return err; |
1338 | } | 1515 | } |
1339 | 1516 | ||
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 8b034337793f..7d1485676886 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c | |||
@@ -15,7 +15,6 @@ | |||
15 | 15 | ||
16 | #define pr_fmt(fmt) "%s: " fmt, __func__ | 16 | #define pr_fmt(fmt) "%s: " fmt, __func__ |
17 | 17 | ||
18 | #include <linux/version.h> | ||
19 | #include <linux/err.h> | 18 | #include <linux/err.h> |
20 | #include <linux/device.h> | 19 | #include <linux/device.h> |
21 | #include <linux/module.h> | 20 | #include <linux/module.h> |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index bd78acf3c365..97f4af1d8a64 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -720,7 +720,6 @@ struct talitos_ctx { | |||
720 | #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 | 720 | #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 |
721 | 721 | ||
722 | struct talitos_ahash_req_ctx { | 722 | struct talitos_ahash_req_ctx { |
723 | u64 count; | ||
724 | u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; | 723 | u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; |
725 | unsigned int hw_context_size; | 724 | unsigned int hw_context_size; |
726 | u8 buf[HASH_MAX_BLOCK_SIZE]; | 725 | u8 buf[HASH_MAX_BLOCK_SIZE]; |
@@ -729,6 +728,7 @@ struct talitos_ahash_req_ctx { | |||
729 | unsigned int first; | 728 | unsigned int first; |
730 | unsigned int last; | 729 | unsigned int last; |
731 | unsigned int to_hash_later; | 730 | unsigned int to_hash_later; |
731 | u64 nbuf; | ||
732 | struct scatterlist bufsl[2]; | 732 | struct scatterlist bufsl[2]; |
733 | struct scatterlist *psrc; | 733 | struct scatterlist *psrc; |
734 | }; | 734 | }; |
@@ -1613,6 +1613,7 @@ static void ahash_done(struct device *dev, | |||
1613 | if (!req_ctx->last && req_ctx->to_hash_later) { | 1613 | if (!req_ctx->last && req_ctx->to_hash_later) { |
1614 | /* Position any partial block for next update/final/finup */ | 1614 | /* Position any partial block for next update/final/finup */ |
1615 | memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later); | 1615 | memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later); |
1616 | req_ctx->nbuf = req_ctx->to_hash_later; | ||
1616 | } | 1617 | } |
1617 | common_nonsnoop_hash_unmap(dev, edesc, areq); | 1618 | common_nonsnoop_hash_unmap(dev, edesc, areq); |
1618 | 1619 | ||
@@ -1728,7 +1729,7 @@ static int ahash_init(struct ahash_request *areq) | |||
1728 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | 1729 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
1729 | 1730 | ||
1730 | /* Initialize the context */ | 1731 | /* Initialize the context */ |
1731 | req_ctx->count = 0; | 1732 | req_ctx->nbuf = 0; |
1732 | req_ctx->first = 1; /* first indicates h/w must init its context */ | 1733 | req_ctx->first = 1; /* first indicates h/w must init its context */ |
1733 | req_ctx->swinit = 0; /* assume h/w init of context */ | 1734 | req_ctx->swinit = 0; /* assume h/w init of context */ |
1734 | req_ctx->hw_context_size = | 1735 | req_ctx->hw_context_size = |
@@ -1776,52 +1777,54 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) | |||
1776 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | 1777 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); |
1777 | unsigned int nbytes_to_hash; | 1778 | unsigned int nbytes_to_hash; |
1778 | unsigned int to_hash_later; | 1779 | unsigned int to_hash_later; |
1779 | unsigned int index; | 1780 | unsigned int nsg; |
1780 | int chained; | 1781 | int chained; |
1781 | 1782 | ||
1782 | index = req_ctx->count & (blocksize - 1); | 1783 | if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { |
1783 | req_ctx->count += nbytes; | 1784 | /* Buffer up to one whole block */ |
1784 | |||
1785 | if (!req_ctx->last && (index + nbytes) < blocksize) { | ||
1786 | /* Buffer the partial block */ | ||
1787 | sg_copy_to_buffer(areq->src, | 1785 | sg_copy_to_buffer(areq->src, |
1788 | sg_count(areq->src, nbytes, &chained), | 1786 | sg_count(areq->src, nbytes, &chained), |
1789 | req_ctx->buf + index, nbytes); | 1787 | req_ctx->buf + req_ctx->nbuf, nbytes); |
1788 | req_ctx->nbuf += nbytes; | ||
1790 | return 0; | 1789 | return 0; |
1791 | } | 1790 | } |
1792 | 1791 | ||
1793 | if (index) { | 1792 | /* At least (blocksize + 1) bytes are available to hash */ |
1794 | /* partial block from previous update; chain it in. */ | 1793 | nbytes_to_hash = nbytes + req_ctx->nbuf; |
1795 | sg_init_table(req_ctx->bufsl, (nbytes) ? 2 : 1); | 1794 | to_hash_later = nbytes_to_hash & (blocksize - 1); |
1796 | sg_set_buf(req_ctx->bufsl, req_ctx->buf, index); | 1795 | |
1797 | if (nbytes) | 1796 | if (req_ctx->last) |
1798 | scatterwalk_sg_chain(req_ctx->bufsl, 2, | 1797 | to_hash_later = 0; |
1799 | areq->src); | 1798 | else if (to_hash_later) |
1799 | /* There is a partial block. Hash the full block(s) now */ | ||
1800 | nbytes_to_hash -= to_hash_later; | ||
1801 | else { | ||
1802 | /* Keep one block buffered */ | ||
1803 | nbytes_to_hash -= blocksize; | ||
1804 | to_hash_later = blocksize; | ||
1805 | } | ||
1806 | |||
1807 | /* Chain in any previously buffered data */ | ||
1808 | if (req_ctx->nbuf) { | ||
1809 | nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1; | ||
1810 | sg_init_table(req_ctx->bufsl, nsg); | ||
1811 | sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf); | ||
1812 | if (nsg > 1) | ||
1813 | scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src); | ||
1800 | req_ctx->psrc = req_ctx->bufsl; | 1814 | req_ctx->psrc = req_ctx->bufsl; |
1801 | } else { | 1815 | } else |
1802 | req_ctx->psrc = areq->src; | 1816 | req_ctx->psrc = areq->src; |
1817 | |||
1818 | if (to_hash_later) { | ||
1819 | int nents = sg_count(areq->src, nbytes, &chained); | ||
1820 | sg_copy_end_to_buffer(areq->src, nents, | ||
1821 | req_ctx->bufnext, | ||
1822 | to_hash_later, | ||
1823 | nbytes - to_hash_later); | ||
1803 | } | 1824 | } |
1804 | nbytes_to_hash = index + nbytes; | 1825 | req_ctx->to_hash_later = to_hash_later; |
1805 | if (!req_ctx->last) { | ||
1806 | to_hash_later = (nbytes_to_hash & (blocksize - 1)); | ||
1807 | if (to_hash_later) { | ||
1808 | int nents; | ||
1809 | /* Must copy to_hash_later bytes from the end | ||
1810 | * to bufnext (a partial block) for later. | ||
1811 | */ | ||
1812 | nents = sg_count(areq->src, nbytes, &chained); | ||
1813 | sg_copy_end_to_buffer(areq->src, nents, | ||
1814 | req_ctx->bufnext, | ||
1815 | to_hash_later, | ||
1816 | nbytes - to_hash_later); | ||
1817 | |||
1818 | /* Adjust count for what will be hashed now */ | ||
1819 | nbytes_to_hash -= to_hash_later; | ||
1820 | } | ||
1821 | req_ctx->to_hash_later = to_hash_later; | ||
1822 | } | ||
1823 | 1826 | ||
1824 | /* allocate extended descriptor */ | 1827 | /* Allocate extended descriptor */ |
1825 | edesc = ahash_edesc_alloc(areq, nbytes_to_hash); | 1828 | edesc = ahash_edesc_alloc(areq, nbytes_to_hash); |
1826 | if (IS_ERR(edesc)) | 1829 | if (IS_ERR(edesc)) |
1827 | return PTR_ERR(edesc); | 1830 | return PTR_ERR(edesc); |
diff --git a/include/linux/padata.h b/include/linux/padata.h index 8d8406246eef..bdcd1e9eacea 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h | |||
@@ -25,6 +25,11 @@ | |||
25 | #include <linux/spinlock.h> | 25 | #include <linux/spinlock.h> |
26 | #include <linux/list.h> | 26 | #include <linux/list.h> |
27 | #include <linux/timer.h> | 27 | #include <linux/timer.h> |
28 | #include <linux/notifier.h> | ||
29 | #include <linux/kobject.h> | ||
30 | |||
31 | #define PADATA_CPU_SERIAL 0x01 | ||
32 | #define PADATA_CPU_PARALLEL 0x02 | ||
28 | 33 | ||
29 | /** | 34 | /** |
30 | * struct padata_priv - Embedded to the users data structure. | 35 | * struct padata_priv - Embedded to the users data structure. |
@@ -59,7 +64,20 @@ struct padata_list { | |||
59 | }; | 64 | }; |
60 | 65 | ||
61 | /** | 66 | /** |
62 | * struct padata_queue - The percpu padata queues. | 67 | * struct padata_serial_queue - The percpu padata serial queue |
68 | * | ||
69 | * @serial: List to wait for serialization after reordering. | ||
70 | * @work: work struct for serialization. | ||
71 | * @pd: Backpointer to the internal control structure. | ||
72 | */ | ||
73 | struct padata_serial_queue { | ||
74 | struct padata_list serial; | ||
75 | struct work_struct work; | ||
76 | struct parallel_data *pd; | ||
77 | }; | ||
78 | |||
79 | /** | ||
80 | * struct padata_parallel_queue - The percpu padata parallel queue | ||
63 | * | 81 | * |
64 | * @parallel: List to wait for parallelization. | 82 | * @parallel: List to wait for parallelization. |
65 | * @reorder: List to wait for reordering after parallel processing. | 83 | * @reorder: List to wait for reordering after parallel processing. |
@@ -67,18 +85,28 @@ struct padata_list { | |||
67 | * @pwork: work struct for parallelization. | 85 | * @pwork: work struct for parallelization. |
68 | * @swork: work struct for serialization. | 86 | * @swork: work struct for serialization. |
69 | * @pd: Backpointer to the internal control structure. | 87 | * @pd: Backpointer to the internal control structure. |
88 | * @work: work struct for parallelization. | ||
70 | * @num_obj: Number of objects that are processed by this cpu. | 89 | * @num_obj: Number of objects that are processed by this cpu. |
71 | * @cpu_index: Index of the cpu. | 90 | * @cpu_index: Index of the cpu. |
72 | */ | 91 | */ |
73 | struct padata_queue { | 92 | struct padata_parallel_queue { |
74 | struct padata_list parallel; | 93 | struct padata_list parallel; |
75 | struct padata_list reorder; | 94 | struct padata_list reorder; |
76 | struct padata_list serial; | 95 | struct parallel_data *pd; |
77 | struct work_struct pwork; | 96 | struct work_struct work; |
78 | struct work_struct swork; | 97 | atomic_t num_obj; |
79 | struct parallel_data *pd; | 98 | int cpu_index; |
80 | atomic_t num_obj; | 99 | }; |
81 | int cpu_index; | 100 | |
101 | /** | ||
102 | * struct padata_cpumask - The cpumasks for the parallel/serial workers | ||
103 | * | ||
104 | * @pcpu: cpumask for the parallel workers. | ||
105 | * @cbcpu: cpumask for the serial (callback) workers. | ||
106 | */ | ||
107 | struct padata_cpumask { | ||
108 | cpumask_var_t pcpu; | ||
109 | cpumask_var_t cbcpu; | ||
82 | }; | 110 | }; |
83 | 111 | ||
84 | /** | 112 | /** |
@@ -86,25 +114,29 @@ struct padata_queue { | |||
86 | * that depends on the cpumask in use. | 114 | * that depends on the cpumask in use. |
87 | * | 115 | * |
88 | * @pinst: padata instance. | 116 | * @pinst: padata instance. |
89 | * @queue: percpu padata queues. | 117 | * @pqueue: percpu padata queues used for parallelization. |
118 | * @squeue: percpu padata queues used for serialuzation. | ||
90 | * @seq_nr: The sequence number that will be attached to the next object. | 119 | * @seq_nr: The sequence number that will be attached to the next object. |
91 | * @reorder_objects: Number of objects waiting in the reorder queues. | 120 | * @reorder_objects: Number of objects waiting in the reorder queues. |
92 | * @refcnt: Number of objects holding a reference on this parallel_data. | 121 | * @refcnt: Number of objects holding a reference on this parallel_data. |
93 | * @max_seq_nr: Maximal used sequence number. | 122 | * @max_seq_nr: Maximal used sequence number. |
94 | * @cpumask: cpumask in use. | 123 | * @cpumask: The cpumasks in use for parallel and serial workers. |
95 | * @lock: Reorder lock. | 124 | * @lock: Reorder lock. |
125 | * @processed: Number of already processed objects. | ||
96 | * @timer: Reorder timer. | 126 | * @timer: Reorder timer. |
97 | */ | 127 | */ |
98 | struct parallel_data { | 128 | struct parallel_data { |
99 | struct padata_instance *pinst; | 129 | struct padata_instance *pinst; |
100 | struct padata_queue *queue; | 130 | struct padata_parallel_queue *pqueue; |
101 | atomic_t seq_nr; | 131 | struct padata_serial_queue *squeue; |
102 | atomic_t reorder_objects; | 132 | atomic_t seq_nr; |
103 | atomic_t refcnt; | 133 | atomic_t reorder_objects; |
104 | unsigned int max_seq_nr; | 134 | atomic_t refcnt; |
105 | cpumask_var_t cpumask; | 135 | unsigned int max_seq_nr; |
106 | spinlock_t lock; | 136 | struct padata_cpumask cpumask; |
107 | struct timer_list timer; | 137 | spinlock_t lock ____cacheline_aligned; |
138 | unsigned int processed; | ||
139 | struct timer_list timer; | ||
108 | }; | 140 | }; |
109 | 141 | ||
110 | /** | 142 | /** |
@@ -113,31 +145,48 @@ struct parallel_data { | |||
113 | * @cpu_notifier: cpu hotplug notifier. | 145 | * @cpu_notifier: cpu hotplug notifier. |
114 | * @wq: The workqueue in use. | 146 | * @wq: The workqueue in use. |
115 | * @pd: The internal control structure. | 147 | * @pd: The internal control structure. |
116 | * @cpumask: User supplied cpumask. | 148 | * @cpumask: User supplied cpumasks for parallel and serial works. |
149 | * @cpumask_change_notifier: Notifiers chain for user-defined notify | ||
150 | * callbacks that will be called when either @pcpu or @cbcpu | ||
151 | * or both cpumasks change. | ||
152 | * @kobj: padata instance kernel object. | ||
117 | * @lock: padata instance lock. | 153 | * @lock: padata instance lock. |
118 | * @flags: padata flags. | 154 | * @flags: padata flags. |
119 | */ | 155 | */ |
120 | struct padata_instance { | 156 | struct padata_instance { |
121 | struct notifier_block cpu_notifier; | 157 | struct notifier_block cpu_notifier; |
122 | struct workqueue_struct *wq; | 158 | struct workqueue_struct *wq; |
123 | struct parallel_data *pd; | 159 | struct parallel_data *pd; |
124 | cpumask_var_t cpumask; | 160 | struct padata_cpumask cpumask; |
125 | struct mutex lock; | 161 | struct blocking_notifier_head cpumask_change_notifier; |
126 | u8 flags; | 162 | struct kobject kobj; |
127 | #define PADATA_INIT 1 | 163 | struct mutex lock; |
128 | #define PADATA_RESET 2 | 164 | u8 flags; |
165 | #define PADATA_INIT 1 | ||
166 | #define PADATA_RESET 2 | ||
167 | #define PADATA_INVALID 4 | ||
129 | }; | 168 | }; |
130 | 169 | ||
131 | extern struct padata_instance *padata_alloc(const struct cpumask *cpumask, | 170 | extern struct padata_instance *padata_alloc_possible( |
132 | struct workqueue_struct *wq); | 171 | struct workqueue_struct *wq); |
172 | extern struct padata_instance *padata_alloc(struct workqueue_struct *wq, | ||
173 | const struct cpumask *pcpumask, | ||
174 | const struct cpumask *cbcpumask); | ||
133 | extern void padata_free(struct padata_instance *pinst); | 175 | extern void padata_free(struct padata_instance *pinst); |
134 | extern int padata_do_parallel(struct padata_instance *pinst, | 176 | extern int padata_do_parallel(struct padata_instance *pinst, |
135 | struct padata_priv *padata, int cb_cpu); | 177 | struct padata_priv *padata, int cb_cpu); |
136 | extern void padata_do_serial(struct padata_priv *padata); | 178 | extern void padata_do_serial(struct padata_priv *padata); |
137 | extern int padata_set_cpumask(struct padata_instance *pinst, | 179 | extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, |
138 | cpumask_var_t cpumask); | 180 | cpumask_var_t cpumask); |
139 | extern int padata_add_cpu(struct padata_instance *pinst, int cpu); | 181 | extern int padata_set_cpumasks(struct padata_instance *pinst, |
140 | extern int padata_remove_cpu(struct padata_instance *pinst, int cpu); | 182 | cpumask_var_t pcpumask, |
141 | extern void padata_start(struct padata_instance *pinst); | 183 | cpumask_var_t cbcpumask); |
184 | extern int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask); | ||
185 | extern int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask); | ||
186 | extern int padata_start(struct padata_instance *pinst); | ||
142 | extern void padata_stop(struct padata_instance *pinst); | 187 | extern void padata_stop(struct padata_instance *pinst); |
188 | extern int padata_register_cpumask_notifier(struct padata_instance *pinst, | ||
189 | struct notifier_block *nblock); | ||
190 | extern int padata_unregister_cpumask_notifier(struct padata_instance *pinst, | ||
191 | struct notifier_block *nblock); | ||
143 | #endif | 192 | #endif |
diff --git a/kernel/padata.c b/kernel/padata.c index fdd8ae609ce3..751019415d23 100644 --- a/kernel/padata.c +++ b/kernel/padata.c | |||
@@ -26,18 +26,19 @@ | |||
26 | #include <linux/mutex.h> | 26 | #include <linux/mutex.h> |
27 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include <linux/sysfs.h> | ||
29 | #include <linux/rcupdate.h> | 30 | #include <linux/rcupdate.h> |
30 | 31 | ||
31 | #define MAX_SEQ_NR INT_MAX - NR_CPUS | 32 | #define MAX_SEQ_NR (INT_MAX - NR_CPUS) |
32 | #define MAX_OBJ_NUM 1000 | 33 | #define MAX_OBJ_NUM 1000 |
33 | 34 | ||
34 | static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) | 35 | static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) |
35 | { | 36 | { |
36 | int cpu, target_cpu; | 37 | int cpu, target_cpu; |
37 | 38 | ||
38 | target_cpu = cpumask_first(pd->cpumask); | 39 | target_cpu = cpumask_first(pd->cpumask.pcpu); |
39 | for (cpu = 0; cpu < cpu_index; cpu++) | 40 | for (cpu = 0; cpu < cpu_index; cpu++) |
40 | target_cpu = cpumask_next(target_cpu, pd->cpumask); | 41 | target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); |
41 | 42 | ||
42 | return target_cpu; | 43 | return target_cpu; |
43 | } | 44 | } |
@@ -53,26 +54,27 @@ static int padata_cpu_hash(struct padata_priv *padata) | |||
53 | * Hash the sequence numbers to the cpus by taking | 54 | * Hash the sequence numbers to the cpus by taking |
54 | * seq_nr mod. number of cpus in use. | 55 | * seq_nr mod. number of cpus in use. |
55 | */ | 56 | */ |
56 | cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask); | 57 | cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask.pcpu); |
57 | 58 | ||
58 | return padata_index_to_cpu(pd, cpu_index); | 59 | return padata_index_to_cpu(pd, cpu_index); |
59 | } | 60 | } |
60 | 61 | ||
61 | static void padata_parallel_worker(struct work_struct *work) | 62 | static void padata_parallel_worker(struct work_struct *parallel_work) |
62 | { | 63 | { |
63 | struct padata_queue *queue; | 64 | struct padata_parallel_queue *pqueue; |
64 | struct parallel_data *pd; | 65 | struct parallel_data *pd; |
65 | struct padata_instance *pinst; | 66 | struct padata_instance *pinst; |
66 | LIST_HEAD(local_list); | 67 | LIST_HEAD(local_list); |
67 | 68 | ||
68 | local_bh_disable(); | 69 | local_bh_disable(); |
69 | queue = container_of(work, struct padata_queue, pwork); | 70 | pqueue = container_of(parallel_work, |
70 | pd = queue->pd; | 71 | struct padata_parallel_queue, work); |
72 | pd = pqueue->pd; | ||
71 | pinst = pd->pinst; | 73 | pinst = pd->pinst; |
72 | 74 | ||
73 | spin_lock(&queue->parallel.lock); | 75 | spin_lock(&pqueue->parallel.lock); |
74 | list_replace_init(&queue->parallel.list, &local_list); | 76 | list_replace_init(&pqueue->parallel.list, &local_list); |
75 | spin_unlock(&queue->parallel.lock); | 77 | spin_unlock(&pqueue->parallel.lock); |
76 | 78 | ||
77 | while (!list_empty(&local_list)) { | 79 | while (!list_empty(&local_list)) { |
78 | struct padata_priv *padata; | 80 | struct padata_priv *padata; |
@@ -94,7 +96,7 @@ static void padata_parallel_worker(struct work_struct *work) | |||
94 | * @pinst: padata instance | 96 | * @pinst: padata instance |
95 | * @padata: object to be parallelized | 97 | * @padata: object to be parallelized |
96 | * @cb_cpu: cpu the serialization callback function will run on, | 98 | * @cb_cpu: cpu the serialization callback function will run on, |
97 | * must be in the cpumask of padata. | 99 | * must be in the serial cpumask of padata(i.e. cpumask.cbcpu). |
98 | * | 100 | * |
99 | * The parallelization callback function will run with BHs off. | 101 | * The parallelization callback function will run with BHs off. |
100 | * Note: Every object which is parallelized by padata_do_parallel | 102 | * Note: Every object which is parallelized by padata_do_parallel |
@@ -104,15 +106,18 @@ int padata_do_parallel(struct padata_instance *pinst, | |||
104 | struct padata_priv *padata, int cb_cpu) | 106 | struct padata_priv *padata, int cb_cpu) |
105 | { | 107 | { |
106 | int target_cpu, err; | 108 | int target_cpu, err; |
107 | struct padata_queue *queue; | 109 | struct padata_parallel_queue *queue; |
108 | struct parallel_data *pd; | 110 | struct parallel_data *pd; |
109 | 111 | ||
110 | rcu_read_lock_bh(); | 112 | rcu_read_lock_bh(); |
111 | 113 | ||
112 | pd = rcu_dereference(pinst->pd); | 114 | pd = rcu_dereference(pinst->pd); |
113 | 115 | ||
114 | err = 0; | 116 | err = -EINVAL; |
115 | if (!(pinst->flags & PADATA_INIT)) | 117 | if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID) |
118 | goto out; | ||
119 | |||
120 | if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu)) | ||
116 | goto out; | 121 | goto out; |
117 | 122 | ||
118 | err = -EBUSY; | 123 | err = -EBUSY; |
@@ -122,11 +127,7 @@ int padata_do_parallel(struct padata_instance *pinst, | |||
122 | if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM) | 127 | if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM) |
123 | goto out; | 128 | goto out; |
124 | 129 | ||
125 | err = -EINVAL; | 130 | err = 0; |
126 | if (!cpumask_test_cpu(cb_cpu, pd->cpumask)) | ||
127 | goto out; | ||
128 | |||
129 | err = -EINPROGRESS; | ||
130 | atomic_inc(&pd->refcnt); | 131 | atomic_inc(&pd->refcnt); |
131 | padata->pd = pd; | 132 | padata->pd = pd; |
132 | padata->cb_cpu = cb_cpu; | 133 | padata->cb_cpu = cb_cpu; |
@@ -137,13 +138,13 @@ int padata_do_parallel(struct padata_instance *pinst, | |||
137 | padata->seq_nr = atomic_inc_return(&pd->seq_nr); | 138 | padata->seq_nr = atomic_inc_return(&pd->seq_nr); |
138 | 139 | ||
139 | target_cpu = padata_cpu_hash(padata); | 140 | target_cpu = padata_cpu_hash(padata); |
140 | queue = per_cpu_ptr(pd->queue, target_cpu); | 141 | queue = per_cpu_ptr(pd->pqueue, target_cpu); |
141 | 142 | ||
142 | spin_lock(&queue->parallel.lock); | 143 | spin_lock(&queue->parallel.lock); |
143 | list_add_tail(&padata->list, &queue->parallel.list); | 144 | list_add_tail(&padata->list, &queue->parallel.list); |
144 | spin_unlock(&queue->parallel.lock); | 145 | spin_unlock(&queue->parallel.lock); |
145 | 146 | ||
146 | queue_work_on(target_cpu, pinst->wq, &queue->pwork); | 147 | queue_work_on(target_cpu, pinst->wq, &queue->work); |
147 | 148 | ||
148 | out: | 149 | out: |
149 | rcu_read_unlock_bh(); | 150 | rcu_read_unlock_bh(); |
@@ -171,84 +172,52 @@ EXPORT_SYMBOL(padata_do_parallel); | |||
171 | */ | 172 | */ |
172 | static struct padata_priv *padata_get_next(struct parallel_data *pd) | 173 | static struct padata_priv *padata_get_next(struct parallel_data *pd) |
173 | { | 174 | { |
174 | int cpu, num_cpus, empty, calc_seq_nr; | 175 | int cpu, num_cpus; |
175 | int seq_nr, next_nr, overrun, next_overrun; | 176 | int next_nr, next_index; |
176 | struct padata_queue *queue, *next_queue; | 177 | struct padata_parallel_queue *queue, *next_queue; |
177 | struct padata_priv *padata; | 178 | struct padata_priv *padata; |
178 | struct padata_list *reorder; | 179 | struct padata_list *reorder; |
179 | 180 | ||
180 | empty = 0; | 181 | num_cpus = cpumask_weight(pd->cpumask.pcpu); |
181 | next_nr = -1; | ||
182 | next_overrun = 0; | ||
183 | next_queue = NULL; | ||
184 | |||
185 | num_cpus = cpumask_weight(pd->cpumask); | ||
186 | |||
187 | for_each_cpu(cpu, pd->cpumask) { | ||
188 | queue = per_cpu_ptr(pd->queue, cpu); | ||
189 | reorder = &queue->reorder; | ||
190 | |||
191 | /* | ||
192 | * Calculate the seq_nr of the object that should be | ||
193 | * next in this reorder queue. | ||
194 | */ | ||
195 | overrun = 0; | ||
196 | calc_seq_nr = (atomic_read(&queue->num_obj) * num_cpus) | ||
197 | + queue->cpu_index; | ||
198 | 182 | ||
199 | if (unlikely(calc_seq_nr > pd->max_seq_nr)) { | 183 | /* |
200 | calc_seq_nr = calc_seq_nr - pd->max_seq_nr - 1; | 184 | * Calculate the percpu reorder queue and the sequence |
201 | overrun = 1; | 185 | * number of the next object. |
202 | } | 186 | */ |
203 | 187 | next_nr = pd->processed; | |
204 | if (!list_empty(&reorder->list)) { | 188 | next_index = next_nr % num_cpus; |
205 | padata = list_entry(reorder->list.next, | 189 | cpu = padata_index_to_cpu(pd, next_index); |
206 | struct padata_priv, list); | 190 | next_queue = per_cpu_ptr(pd->pqueue, cpu); |
207 | 191 | ||
208 | seq_nr = padata->seq_nr; | 192 | if (unlikely(next_nr > pd->max_seq_nr)) { |
209 | BUG_ON(calc_seq_nr != seq_nr); | 193 | next_nr = next_nr - pd->max_seq_nr - 1; |
210 | } else { | 194 | next_index = next_nr % num_cpus; |
211 | seq_nr = calc_seq_nr; | 195 | cpu = padata_index_to_cpu(pd, next_index); |
212 | empty++; | 196 | next_queue = per_cpu_ptr(pd->pqueue, cpu); |
213 | } | 197 | pd->processed = 0; |
214 | |||
215 | if (next_nr < 0 || seq_nr < next_nr | ||
216 | || (next_overrun && !overrun)) { | ||
217 | next_nr = seq_nr; | ||
218 | next_overrun = overrun; | ||
219 | next_queue = queue; | ||
220 | } | ||
221 | } | 198 | } |
222 | 199 | ||
223 | padata = NULL; | 200 | padata = NULL; |
224 | 201 | ||
225 | if (empty == num_cpus) | ||
226 | goto out; | ||
227 | |||
228 | reorder = &next_queue->reorder; | 202 | reorder = &next_queue->reorder; |
229 | 203 | ||
230 | if (!list_empty(&reorder->list)) { | 204 | if (!list_empty(&reorder->list)) { |
231 | padata = list_entry(reorder->list.next, | 205 | padata = list_entry(reorder->list.next, |
232 | struct padata_priv, list); | 206 | struct padata_priv, list); |
233 | 207 | ||
234 | if (unlikely(next_overrun)) { | 208 | BUG_ON(next_nr != padata->seq_nr); |
235 | for_each_cpu(cpu, pd->cpumask) { | ||
236 | queue = per_cpu_ptr(pd->queue, cpu); | ||
237 | atomic_set(&queue->num_obj, 0); | ||
238 | } | ||
239 | } | ||
240 | 209 | ||
241 | spin_lock(&reorder->lock); | 210 | spin_lock(&reorder->lock); |
242 | list_del_init(&padata->list); | 211 | list_del_init(&padata->list); |
243 | atomic_dec(&pd->reorder_objects); | 212 | atomic_dec(&pd->reorder_objects); |
244 | spin_unlock(&reorder->lock); | 213 | spin_unlock(&reorder->lock); |
245 | 214 | ||
246 | atomic_inc(&next_queue->num_obj); | 215 | pd->processed++; |
247 | 216 | ||
248 | goto out; | 217 | goto out; |
249 | } | 218 | } |
250 | 219 | ||
251 | queue = per_cpu_ptr(pd->queue, smp_processor_id()); | 220 | queue = per_cpu_ptr(pd->pqueue, smp_processor_id()); |
252 | if (queue->cpu_index == next_queue->cpu_index) { | 221 | if (queue->cpu_index == next_queue->cpu_index) { |
253 | padata = ERR_PTR(-ENODATA); | 222 | padata = ERR_PTR(-ENODATA); |
254 | goto out; | 223 | goto out; |
@@ -262,7 +231,7 @@ out: | |||
262 | static void padata_reorder(struct parallel_data *pd) | 231 | static void padata_reorder(struct parallel_data *pd) |
263 | { | 232 | { |
264 | struct padata_priv *padata; | 233 | struct padata_priv *padata; |
265 | struct padata_queue *queue; | 234 | struct padata_serial_queue *squeue; |
266 | struct padata_instance *pinst = pd->pinst; | 235 | struct padata_instance *pinst = pd->pinst; |
267 | 236 | ||
268 | /* | 237 | /* |
@@ -301,13 +270,13 @@ static void padata_reorder(struct parallel_data *pd) | |||
301 | return; | 270 | return; |
302 | } | 271 | } |
303 | 272 | ||
304 | queue = per_cpu_ptr(pd->queue, padata->cb_cpu); | 273 | squeue = per_cpu_ptr(pd->squeue, padata->cb_cpu); |
305 | 274 | ||
306 | spin_lock(&queue->serial.lock); | 275 | spin_lock(&squeue->serial.lock); |
307 | list_add_tail(&padata->list, &queue->serial.list); | 276 | list_add_tail(&padata->list, &squeue->serial.list); |
308 | spin_unlock(&queue->serial.lock); | 277 | spin_unlock(&squeue->serial.lock); |
309 | 278 | ||
310 | queue_work_on(padata->cb_cpu, pinst->wq, &queue->swork); | 279 | queue_work_on(padata->cb_cpu, pinst->wq, &squeue->work); |
311 | } | 280 | } |
312 | 281 | ||
313 | spin_unlock_bh(&pd->lock); | 282 | spin_unlock_bh(&pd->lock); |
@@ -333,19 +302,19 @@ static void padata_reorder_timer(unsigned long arg) | |||
333 | padata_reorder(pd); | 302 | padata_reorder(pd); |
334 | } | 303 | } |
335 | 304 | ||
336 | static void padata_serial_worker(struct work_struct *work) | 305 | static void padata_serial_worker(struct work_struct *serial_work) |
337 | { | 306 | { |
338 | struct padata_queue *queue; | 307 | struct padata_serial_queue *squeue; |
339 | struct parallel_data *pd; | 308 | struct parallel_data *pd; |
340 | LIST_HEAD(local_list); | 309 | LIST_HEAD(local_list); |
341 | 310 | ||
342 | local_bh_disable(); | 311 | local_bh_disable(); |
343 | queue = container_of(work, struct padata_queue, swork); | 312 | squeue = container_of(serial_work, struct padata_serial_queue, work); |
344 | pd = queue->pd; | 313 | pd = squeue->pd; |
345 | 314 | ||
346 | spin_lock(&queue->serial.lock); | 315 | spin_lock(&squeue->serial.lock); |
347 | list_replace_init(&queue->serial.list, &local_list); | 316 | list_replace_init(&squeue->serial.list, &local_list); |
348 | spin_unlock(&queue->serial.lock); | 317 | spin_unlock(&squeue->serial.lock); |
349 | 318 | ||
350 | while (!list_empty(&local_list)) { | 319 | while (!list_empty(&local_list)) { |
351 | struct padata_priv *padata; | 320 | struct padata_priv *padata; |
@@ -372,18 +341,18 @@ static void padata_serial_worker(struct work_struct *work) | |||
372 | void padata_do_serial(struct padata_priv *padata) | 341 | void padata_do_serial(struct padata_priv *padata) |
373 | { | 342 | { |
374 | int cpu; | 343 | int cpu; |
375 | struct padata_queue *queue; | 344 | struct padata_parallel_queue *pqueue; |
376 | struct parallel_data *pd; | 345 | struct parallel_data *pd; |
377 | 346 | ||
378 | pd = padata->pd; | 347 | pd = padata->pd; |
379 | 348 | ||
380 | cpu = get_cpu(); | 349 | cpu = get_cpu(); |
381 | queue = per_cpu_ptr(pd->queue, cpu); | 350 | pqueue = per_cpu_ptr(pd->pqueue, cpu); |
382 | 351 | ||
383 | spin_lock(&queue->reorder.lock); | 352 | spin_lock(&pqueue->reorder.lock); |
384 | atomic_inc(&pd->reorder_objects); | 353 | atomic_inc(&pd->reorder_objects); |
385 | list_add_tail(&padata->list, &queue->reorder.list); | 354 | list_add_tail(&padata->list, &pqueue->reorder.list); |
386 | spin_unlock(&queue->reorder.lock); | 355 | spin_unlock(&pqueue->reorder.lock); |
387 | 356 | ||
388 | put_cpu(); | 357 | put_cpu(); |
389 | 358 | ||
@@ -391,52 +360,89 @@ void padata_do_serial(struct padata_priv *padata) | |||
391 | } | 360 | } |
392 | EXPORT_SYMBOL(padata_do_serial); | 361 | EXPORT_SYMBOL(padata_do_serial); |
393 | 362 | ||
394 | /* Allocate and initialize the internal cpumask dependend resources. */ | 363 | static int padata_setup_cpumasks(struct parallel_data *pd, |
395 | static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, | 364 | const struct cpumask *pcpumask, |
396 | const struct cpumask *cpumask) | 365 | const struct cpumask *cbcpumask) |
397 | { | 366 | { |
398 | int cpu, cpu_index, num_cpus; | 367 | if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) |
399 | struct padata_queue *queue; | 368 | return -ENOMEM; |
400 | struct parallel_data *pd; | ||
401 | |||
402 | cpu_index = 0; | ||
403 | 369 | ||
404 | pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); | 370 | cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_active_mask); |
405 | if (!pd) | 371 | if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { |
406 | goto err; | 372 | free_cpumask_var(pd->cpumask.cbcpu); |
373 | return -ENOMEM; | ||
374 | } | ||
407 | 375 | ||
408 | pd->queue = alloc_percpu(struct padata_queue); | 376 | cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_active_mask); |
409 | if (!pd->queue) | 377 | return 0; |
410 | goto err_free_pd; | 378 | } |
411 | 379 | ||
412 | if (!alloc_cpumask_var(&pd->cpumask, GFP_KERNEL)) | 380 | static void __padata_list_init(struct padata_list *pd_list) |
413 | goto err_free_queue; | 381 | { |
382 | INIT_LIST_HEAD(&pd_list->list); | ||
383 | spin_lock_init(&pd_list->lock); | ||
384 | } | ||
414 | 385 | ||
415 | cpumask_and(pd->cpumask, cpumask, cpu_active_mask); | 386 | /* Initialize all percpu queues used by serial workers */ |
387 | static void padata_init_squeues(struct parallel_data *pd) | ||
388 | { | ||
389 | int cpu; | ||
390 | struct padata_serial_queue *squeue; | ||
416 | 391 | ||
417 | for_each_cpu(cpu, pd->cpumask) { | 392 | for_each_cpu(cpu, pd->cpumask.cbcpu) { |
418 | queue = per_cpu_ptr(pd->queue, cpu); | 393 | squeue = per_cpu_ptr(pd->squeue, cpu); |
394 | squeue->pd = pd; | ||
395 | __padata_list_init(&squeue->serial); | ||
396 | INIT_WORK(&squeue->work, padata_serial_worker); | ||
397 | } | ||
398 | } | ||
419 | 399 | ||
420 | queue->pd = pd; | 400 | /* Initialize all percpu queues used by parallel workers */ |
401 | static void padata_init_pqueues(struct parallel_data *pd) | ||
402 | { | ||
403 | int cpu_index, num_cpus, cpu; | ||
404 | struct padata_parallel_queue *pqueue; | ||
421 | 405 | ||
422 | queue->cpu_index = cpu_index; | 406 | cpu_index = 0; |
407 | for_each_cpu(cpu, pd->cpumask.pcpu) { | ||
408 | pqueue = per_cpu_ptr(pd->pqueue, cpu); | ||
409 | pqueue->pd = pd; | ||
410 | pqueue->cpu_index = cpu_index; | ||
423 | cpu_index++; | 411 | cpu_index++; |
424 | 412 | ||
425 | INIT_LIST_HEAD(&queue->reorder.list); | 413 | __padata_list_init(&pqueue->reorder); |
426 | INIT_LIST_HEAD(&queue->parallel.list); | 414 | __padata_list_init(&pqueue->parallel); |
427 | INIT_LIST_HEAD(&queue->serial.list); | 415 | INIT_WORK(&pqueue->work, padata_parallel_worker); |
428 | spin_lock_init(&queue->reorder.lock); | 416 | atomic_set(&pqueue->num_obj, 0); |
429 | spin_lock_init(&queue->parallel.lock); | ||
430 | spin_lock_init(&queue->serial.lock); | ||
431 | |||
432 | INIT_WORK(&queue->pwork, padata_parallel_worker); | ||
433 | INIT_WORK(&queue->swork, padata_serial_worker); | ||
434 | atomic_set(&queue->num_obj, 0); | ||
435 | } | 417 | } |
436 | 418 | ||
437 | num_cpus = cpumask_weight(pd->cpumask); | 419 | num_cpus = cpumask_weight(pd->cpumask.pcpu); |
438 | pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1; | 420 | pd->max_seq_nr = num_cpus ? (MAX_SEQ_NR / num_cpus) * num_cpus - 1 : 0; |
421 | } | ||
422 | |||
423 | /* Allocate and initialize the internal cpumask dependend resources. */ | ||
424 | static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, | ||
425 | const struct cpumask *pcpumask, | ||
426 | const struct cpumask *cbcpumask) | ||
427 | { | ||
428 | struct parallel_data *pd; | ||
439 | 429 | ||
430 | pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); | ||
431 | if (!pd) | ||
432 | goto err; | ||
433 | |||
434 | pd->pqueue = alloc_percpu(struct padata_parallel_queue); | ||
435 | if (!pd->pqueue) | ||
436 | goto err_free_pd; | ||
437 | |||
438 | pd->squeue = alloc_percpu(struct padata_serial_queue); | ||
439 | if (!pd->squeue) | ||
440 | goto err_free_pqueue; | ||
441 | if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0) | ||
442 | goto err_free_squeue; | ||
443 | |||
444 | padata_init_pqueues(pd); | ||
445 | padata_init_squeues(pd); | ||
440 | setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); | 446 | setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); |
441 | atomic_set(&pd->seq_nr, -1); | 447 | atomic_set(&pd->seq_nr, -1); |
442 | atomic_set(&pd->reorder_objects, 0); | 448 | atomic_set(&pd->reorder_objects, 0); |
@@ -446,8 +452,10 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, | |||
446 | 452 | ||
447 | return pd; | 453 | return pd; |
448 | 454 | ||
449 | err_free_queue: | 455 | err_free_squeue: |
450 | free_percpu(pd->queue); | 456 | free_percpu(pd->squeue); |
457 | err_free_pqueue: | ||
458 | free_percpu(pd->pqueue); | ||
451 | err_free_pd: | 459 | err_free_pd: |
452 | kfree(pd); | 460 | kfree(pd); |
453 | err: | 461 | err: |
@@ -456,8 +464,10 @@ err: | |||
456 | 464 | ||
457 | static void padata_free_pd(struct parallel_data *pd) | 465 | static void padata_free_pd(struct parallel_data *pd) |
458 | { | 466 | { |
459 | free_cpumask_var(pd->cpumask); | 467 | free_cpumask_var(pd->cpumask.pcpu); |
460 | free_percpu(pd->queue); | 468 | free_cpumask_var(pd->cpumask.cbcpu); |
469 | free_percpu(pd->pqueue); | ||
470 | free_percpu(pd->squeue); | ||
461 | kfree(pd); | 471 | kfree(pd); |
462 | } | 472 | } |
463 | 473 | ||
@@ -465,11 +475,12 @@ static void padata_free_pd(struct parallel_data *pd) | |||
465 | static void padata_flush_queues(struct parallel_data *pd) | 475 | static void padata_flush_queues(struct parallel_data *pd) |
466 | { | 476 | { |
467 | int cpu; | 477 | int cpu; |
468 | struct padata_queue *queue; | 478 | struct padata_parallel_queue *pqueue; |
479 | struct padata_serial_queue *squeue; | ||
469 | 480 | ||
470 | for_each_cpu(cpu, pd->cpumask) { | 481 | for_each_cpu(cpu, pd->cpumask.pcpu) { |
471 | queue = per_cpu_ptr(pd->queue, cpu); | 482 | pqueue = per_cpu_ptr(pd->pqueue, cpu); |
472 | flush_work(&queue->pwork); | 483 | flush_work(&pqueue->work); |
473 | } | 484 | } |
474 | 485 | ||
475 | del_timer_sync(&pd->timer); | 486 | del_timer_sync(&pd->timer); |
@@ -477,19 +488,39 @@ static void padata_flush_queues(struct parallel_data *pd) | |||
477 | if (atomic_read(&pd->reorder_objects)) | 488 | if (atomic_read(&pd->reorder_objects)) |
478 | padata_reorder(pd); | 489 | padata_reorder(pd); |
479 | 490 | ||
480 | for_each_cpu(cpu, pd->cpumask) { | 491 | for_each_cpu(cpu, pd->cpumask.cbcpu) { |
481 | queue = per_cpu_ptr(pd->queue, cpu); | 492 | squeue = per_cpu_ptr(pd->squeue, cpu); |
482 | flush_work(&queue->swork); | 493 | flush_work(&squeue->work); |
483 | } | 494 | } |
484 | 495 | ||
485 | BUG_ON(atomic_read(&pd->refcnt) != 0); | 496 | BUG_ON(atomic_read(&pd->refcnt) != 0); |
486 | } | 497 | } |
487 | 498 | ||
499 | static void __padata_start(struct padata_instance *pinst) | ||
500 | { | ||
501 | pinst->flags |= PADATA_INIT; | ||
502 | } | ||
503 | |||
504 | static void __padata_stop(struct padata_instance *pinst) | ||
505 | { | ||
506 | if (!(pinst->flags & PADATA_INIT)) | ||
507 | return; | ||
508 | |||
509 | pinst->flags &= ~PADATA_INIT; | ||
510 | |||
511 | synchronize_rcu(); | ||
512 | |||
513 | get_online_cpus(); | ||
514 | padata_flush_queues(pinst->pd); | ||
515 | put_online_cpus(); | ||
516 | } | ||
517 | |||
488 | /* Replace the internal control stucture with a new one. */ | 518 | /* Replace the internal control stucture with a new one. */ |
489 | static void padata_replace(struct padata_instance *pinst, | 519 | static void padata_replace(struct padata_instance *pinst, |
490 | struct parallel_data *pd_new) | 520 | struct parallel_data *pd_new) |
491 | { | 521 | { |
492 | struct parallel_data *pd_old = pinst->pd; | 522 | struct parallel_data *pd_old = pinst->pd; |
523 | int notification_mask = 0; | ||
493 | 524 | ||
494 | pinst->flags |= PADATA_RESET; | 525 | pinst->flags |= PADATA_RESET; |
495 | 526 | ||
@@ -497,41 +528,162 @@ static void padata_replace(struct padata_instance *pinst, | |||
497 | 528 | ||
498 | synchronize_rcu(); | 529 | synchronize_rcu(); |
499 | 530 | ||
531 | if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu)) | ||
532 | notification_mask |= PADATA_CPU_PARALLEL; | ||
533 | if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu)) | ||
534 | notification_mask |= PADATA_CPU_SERIAL; | ||
535 | |||
500 | padata_flush_queues(pd_old); | 536 | padata_flush_queues(pd_old); |
501 | padata_free_pd(pd_old); | 537 | padata_free_pd(pd_old); |
502 | 538 | ||
539 | if (notification_mask) | ||
540 | blocking_notifier_call_chain(&pinst->cpumask_change_notifier, | ||
541 | notification_mask, | ||
542 | &pd_new->cpumask); | ||
543 | |||
503 | pinst->flags &= ~PADATA_RESET; | 544 | pinst->flags &= ~PADATA_RESET; |
504 | } | 545 | } |
505 | 546 | ||
506 | /** | 547 | /** |
507 | * padata_set_cpumask - set the cpumask that padata should use | 548 | * padata_register_cpumask_notifier - Registers a notifier that will be called |
549 | * if either pcpu or cbcpu or both cpumasks change. | ||
508 | * | 550 | * |
509 | * @pinst: padata instance | 551 | * @pinst: A poineter to padata instance |
510 | * @cpumask: the cpumask to use | 552 | * @nblock: A pointer to notifier block. |
511 | */ | 553 | */ |
512 | int padata_set_cpumask(struct padata_instance *pinst, | 554 | int padata_register_cpumask_notifier(struct padata_instance *pinst, |
513 | cpumask_var_t cpumask) | 555 | struct notifier_block *nblock) |
514 | { | 556 | { |
557 | return blocking_notifier_chain_register(&pinst->cpumask_change_notifier, | ||
558 | nblock); | ||
559 | } | ||
560 | EXPORT_SYMBOL(padata_register_cpumask_notifier); | ||
561 | |||
562 | /** | ||
563 | * padata_unregister_cpumask_notifier - Unregisters cpumask notifier | ||
564 | * registered earlier using padata_register_cpumask_notifier | ||
565 | * | ||
566 | * @pinst: A pointer to data instance. | ||
567 | * @nlock: A pointer to notifier block. | ||
568 | */ | ||
569 | int padata_unregister_cpumask_notifier(struct padata_instance *pinst, | ||
570 | struct notifier_block *nblock) | ||
571 | { | ||
572 | return blocking_notifier_chain_unregister( | ||
573 | &pinst->cpumask_change_notifier, | ||
574 | nblock); | ||
575 | } | ||
576 | EXPORT_SYMBOL(padata_unregister_cpumask_notifier); | ||
577 | |||
578 | |||
579 | /* If cpumask contains no active cpu, we mark the instance as invalid. */ | ||
580 | static bool padata_validate_cpumask(struct padata_instance *pinst, | ||
581 | const struct cpumask *cpumask) | ||
582 | { | ||
583 | if (!cpumask_intersects(cpumask, cpu_active_mask)) { | ||
584 | pinst->flags |= PADATA_INVALID; | ||
585 | return false; | ||
586 | } | ||
587 | |||
588 | pinst->flags &= ~PADATA_INVALID; | ||
589 | return true; | ||
590 | } | ||
591 | |||
592 | static int __padata_set_cpumasks(struct padata_instance *pinst, | ||
593 | cpumask_var_t pcpumask, | ||
594 | cpumask_var_t cbcpumask) | ||
595 | { | ||
596 | int valid; | ||
515 | struct parallel_data *pd; | 597 | struct parallel_data *pd; |
516 | int err = 0; | 598 | |
599 | valid = padata_validate_cpumask(pinst, pcpumask); | ||
600 | if (!valid) { | ||
601 | __padata_stop(pinst); | ||
602 | goto out_replace; | ||
603 | } | ||
604 | |||
605 | valid = padata_validate_cpumask(pinst, cbcpumask); | ||
606 | if (!valid) | ||
607 | __padata_stop(pinst); | ||
608 | |||
609 | out_replace: | ||
610 | pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); | ||
611 | if (!pd) | ||
612 | return -ENOMEM; | ||
613 | |||
614 | cpumask_copy(pinst->cpumask.pcpu, pcpumask); | ||
615 | cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); | ||
616 | |||
617 | padata_replace(pinst, pd); | ||
618 | |||
619 | if (valid) | ||
620 | __padata_start(pinst); | ||
621 | |||
622 | return 0; | ||
623 | } | ||
624 | |||
625 | /** | ||
626 | * padata_set_cpumasks - Set both parallel and serial cpumasks. The first | ||
627 | * one is used by parallel workers and the second one | ||
628 | * by the wokers doing serialization. | ||
629 | * | ||
630 | * @pinst: padata instance | ||
631 | * @pcpumask: the cpumask to use for parallel workers | ||
632 | * @cbcpumask: the cpumsak to use for serial workers | ||
633 | */ | ||
634 | int padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t pcpumask, | ||
635 | cpumask_var_t cbcpumask) | ||
636 | { | ||
637 | int err; | ||
517 | 638 | ||
518 | mutex_lock(&pinst->lock); | 639 | mutex_lock(&pinst->lock); |
640 | get_online_cpus(); | ||
519 | 641 | ||
642 | err = __padata_set_cpumasks(pinst, pcpumask, cbcpumask); | ||
643 | |||
644 | put_online_cpus(); | ||
645 | mutex_unlock(&pinst->lock); | ||
646 | |||
647 | return err; | ||
648 | |||
649 | } | ||
650 | EXPORT_SYMBOL(padata_set_cpumasks); | ||
651 | |||
652 | /** | ||
653 | * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value | ||
654 | * equivalent to @cpumask. | ||
655 | * | ||
656 | * @pinst: padata instance | ||
657 | * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding | ||
658 | * to parallel and serial cpumasks respectively. | ||
659 | * @cpumask: the cpumask to use | ||
660 | */ | ||
661 | int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, | ||
662 | cpumask_var_t cpumask) | ||
663 | { | ||
664 | struct cpumask *serial_mask, *parallel_mask; | ||
665 | int err = -EINVAL; | ||
666 | |||
667 | mutex_lock(&pinst->lock); | ||
520 | get_online_cpus(); | 668 | get_online_cpus(); |
521 | 669 | ||
522 | pd = padata_alloc_pd(pinst, cpumask); | 670 | switch (cpumask_type) { |
523 | if (!pd) { | 671 | case PADATA_CPU_PARALLEL: |
524 | err = -ENOMEM; | 672 | serial_mask = pinst->cpumask.cbcpu; |
525 | goto out; | 673 | parallel_mask = cpumask; |
674 | break; | ||
675 | case PADATA_CPU_SERIAL: | ||
676 | parallel_mask = pinst->cpumask.pcpu; | ||
677 | serial_mask = cpumask; | ||
678 | break; | ||
679 | default: | ||
680 | goto out; | ||
526 | } | 681 | } |
527 | 682 | ||
528 | cpumask_copy(pinst->cpumask, cpumask); | 683 | err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask); |
529 | |||
530 | padata_replace(pinst, pd); | ||
531 | 684 | ||
532 | out: | 685 | out: |
533 | put_online_cpus(); | 686 | put_online_cpus(); |
534 | |||
535 | mutex_unlock(&pinst->lock); | 687 | mutex_unlock(&pinst->lock); |
536 | 688 | ||
537 | return err; | 689 | return err; |
@@ -543,30 +695,48 @@ static int __padata_add_cpu(struct padata_instance *pinst, int cpu) | |||
543 | struct parallel_data *pd; | 695 | struct parallel_data *pd; |
544 | 696 | ||
545 | if (cpumask_test_cpu(cpu, cpu_active_mask)) { | 697 | if (cpumask_test_cpu(cpu, cpu_active_mask)) { |
546 | pd = padata_alloc_pd(pinst, pinst->cpumask); | 698 | pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, |
699 | pinst->cpumask.cbcpu); | ||
547 | if (!pd) | 700 | if (!pd) |
548 | return -ENOMEM; | 701 | return -ENOMEM; |
549 | 702 | ||
550 | padata_replace(pinst, pd); | 703 | padata_replace(pinst, pd); |
704 | |||
705 | if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && | ||
706 | padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) | ||
707 | __padata_start(pinst); | ||
551 | } | 708 | } |
552 | 709 | ||
553 | return 0; | 710 | return 0; |
554 | } | 711 | } |
555 | 712 | ||
556 | /** | 713 | /** |
557 | * padata_add_cpu - add a cpu to the padata cpumask | 714 | * padata_add_cpu - add a cpu to one or both(parallel and serial) |
715 | * padata cpumasks. | ||
558 | * | 716 | * |
559 | * @pinst: padata instance | 717 | * @pinst: padata instance |
560 | * @cpu: cpu to add | 718 | * @cpu: cpu to add |
719 | * @mask: bitmask of flags specifying to which cpumask @cpu shuld be added. | ||
720 | * The @mask may be any combination of the following flags: | ||
721 | * PADATA_CPU_SERIAL - serial cpumask | ||
722 | * PADATA_CPU_PARALLEL - parallel cpumask | ||
561 | */ | 723 | */ |
562 | int padata_add_cpu(struct padata_instance *pinst, int cpu) | 724 | |
725 | int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask) | ||
563 | { | 726 | { |
564 | int err; | 727 | int err; |
565 | 728 | ||
729 | if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) | ||
730 | return -EINVAL; | ||
731 | |||
566 | mutex_lock(&pinst->lock); | 732 | mutex_lock(&pinst->lock); |
567 | 733 | ||
568 | get_online_cpus(); | 734 | get_online_cpus(); |
569 | cpumask_set_cpu(cpu, pinst->cpumask); | 735 | if (mask & PADATA_CPU_SERIAL) |
736 | cpumask_set_cpu(cpu, pinst->cpumask.cbcpu); | ||
737 | if (mask & PADATA_CPU_PARALLEL) | ||
738 | cpumask_set_cpu(cpu, pinst->cpumask.pcpu); | ||
739 | |||
570 | err = __padata_add_cpu(pinst, cpu); | 740 | err = __padata_add_cpu(pinst, cpu); |
571 | put_online_cpus(); | 741 | put_online_cpus(); |
572 | 742 | ||
@@ -578,10 +748,16 @@ EXPORT_SYMBOL(padata_add_cpu); | |||
578 | 748 | ||
579 | static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) | 749 | static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) |
580 | { | 750 | { |
581 | struct parallel_data *pd; | 751 | struct parallel_data *pd = NULL; |
582 | 752 | ||
583 | if (cpumask_test_cpu(cpu, cpu_online_mask)) { | 753 | if (cpumask_test_cpu(cpu, cpu_online_mask)) { |
584 | pd = padata_alloc_pd(pinst, pinst->cpumask); | 754 | |
755 | if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || | ||
756 | !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) | ||
757 | __padata_stop(pinst); | ||
758 | |||
759 | pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, | ||
760 | pinst->cpumask.cbcpu); | ||
585 | if (!pd) | 761 | if (!pd) |
586 | return -ENOMEM; | 762 | return -ENOMEM; |
587 | 763 | ||
@@ -591,20 +767,32 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) | |||
591 | return 0; | 767 | return 0; |
592 | } | 768 | } |
593 | 769 | ||
594 | /** | 770 | /** |
595 | * padata_remove_cpu - remove a cpu from the padata cpumask | 771 | * padata_remove_cpu - remove a cpu from the one or both(serial and paralell) |
772 | * padata cpumasks. | ||
596 | * | 773 | * |
597 | * @pinst: padata instance | 774 | * @pinst: padata instance |
598 | * @cpu: cpu to remove | 775 | * @cpu: cpu to remove |
776 | * @mask: bitmask specifying from which cpumask @cpu should be removed | ||
777 | * The @mask may be any combination of the following flags: | ||
778 | * PADATA_CPU_SERIAL - serial cpumask | ||
779 | * PADATA_CPU_PARALLEL - parallel cpumask | ||
599 | */ | 780 | */ |
600 | int padata_remove_cpu(struct padata_instance *pinst, int cpu) | 781 | int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask) |
601 | { | 782 | { |
602 | int err; | 783 | int err; |
603 | 784 | ||
785 | if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) | ||
786 | return -EINVAL; | ||
787 | |||
604 | mutex_lock(&pinst->lock); | 788 | mutex_lock(&pinst->lock); |
605 | 789 | ||
606 | get_online_cpus(); | 790 | get_online_cpus(); |
607 | cpumask_clear_cpu(cpu, pinst->cpumask); | 791 | if (mask & PADATA_CPU_SERIAL) |
792 | cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu); | ||
793 | if (mask & PADATA_CPU_PARALLEL) | ||
794 | cpumask_clear_cpu(cpu, pinst->cpumask.pcpu); | ||
795 | |||
608 | err = __padata_remove_cpu(pinst, cpu); | 796 | err = __padata_remove_cpu(pinst, cpu); |
609 | put_online_cpus(); | 797 | put_online_cpus(); |
610 | 798 | ||
@@ -619,11 +807,20 @@ EXPORT_SYMBOL(padata_remove_cpu); | |||
619 | * | 807 | * |
620 | * @pinst: padata instance to start | 808 | * @pinst: padata instance to start |
621 | */ | 809 | */ |
622 | void padata_start(struct padata_instance *pinst) | 810 | int padata_start(struct padata_instance *pinst) |
623 | { | 811 | { |
812 | int err = 0; | ||
813 | |||
624 | mutex_lock(&pinst->lock); | 814 | mutex_lock(&pinst->lock); |
625 | pinst->flags |= PADATA_INIT; | 815 | |
816 | if (pinst->flags & PADATA_INVALID) | ||
817 | err =-EINVAL; | ||
818 | |||
819 | __padata_start(pinst); | ||
820 | |||
626 | mutex_unlock(&pinst->lock); | 821 | mutex_unlock(&pinst->lock); |
822 | |||
823 | return err; | ||
627 | } | 824 | } |
628 | EXPORT_SYMBOL(padata_start); | 825 | EXPORT_SYMBOL(padata_start); |
629 | 826 | ||
@@ -635,12 +832,20 @@ EXPORT_SYMBOL(padata_start); | |||
635 | void padata_stop(struct padata_instance *pinst) | 832 | void padata_stop(struct padata_instance *pinst) |
636 | { | 833 | { |
637 | mutex_lock(&pinst->lock); | 834 | mutex_lock(&pinst->lock); |
638 | pinst->flags &= ~PADATA_INIT; | 835 | __padata_stop(pinst); |
639 | mutex_unlock(&pinst->lock); | 836 | mutex_unlock(&pinst->lock); |
640 | } | 837 | } |
641 | EXPORT_SYMBOL(padata_stop); | 838 | EXPORT_SYMBOL(padata_stop); |
642 | 839 | ||
643 | #ifdef CONFIG_HOTPLUG_CPU | 840 | #ifdef CONFIG_HOTPLUG_CPU |
841 | |||
842 | static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) | ||
843 | { | ||
844 | return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) || | ||
845 | cpumask_test_cpu(cpu, pinst->cpumask.cbcpu); | ||
846 | } | ||
847 | |||
848 | |||
644 | static int padata_cpu_callback(struct notifier_block *nfb, | 849 | static int padata_cpu_callback(struct notifier_block *nfb, |
645 | unsigned long action, void *hcpu) | 850 | unsigned long action, void *hcpu) |
646 | { | 851 | { |
@@ -653,7 +858,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
653 | switch (action) { | 858 | switch (action) { |
654 | case CPU_ONLINE: | 859 | case CPU_ONLINE: |
655 | case CPU_ONLINE_FROZEN: | 860 | case CPU_ONLINE_FROZEN: |
656 | if (!cpumask_test_cpu(cpu, pinst->cpumask)) | 861 | if (!pinst_has_cpu(pinst, cpu)) |
657 | break; | 862 | break; |
658 | mutex_lock(&pinst->lock); | 863 | mutex_lock(&pinst->lock); |
659 | err = __padata_add_cpu(pinst, cpu); | 864 | err = __padata_add_cpu(pinst, cpu); |
@@ -664,7 +869,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
664 | 869 | ||
665 | case CPU_DOWN_PREPARE: | 870 | case CPU_DOWN_PREPARE: |
666 | case CPU_DOWN_PREPARE_FROZEN: | 871 | case CPU_DOWN_PREPARE_FROZEN: |
667 | if (!cpumask_test_cpu(cpu, pinst->cpumask)) | 872 | if (!pinst_has_cpu(pinst, cpu)) |
668 | break; | 873 | break; |
669 | mutex_lock(&pinst->lock); | 874 | mutex_lock(&pinst->lock); |
670 | err = __padata_remove_cpu(pinst, cpu); | 875 | err = __padata_remove_cpu(pinst, cpu); |
@@ -675,7 +880,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
675 | 880 | ||
676 | case CPU_UP_CANCELED: | 881 | case CPU_UP_CANCELED: |
677 | case CPU_UP_CANCELED_FROZEN: | 882 | case CPU_UP_CANCELED_FROZEN: |
678 | if (!cpumask_test_cpu(cpu, pinst->cpumask)) | 883 | if (!pinst_has_cpu(pinst, cpu)) |
679 | break; | 884 | break; |
680 | mutex_lock(&pinst->lock); | 885 | mutex_lock(&pinst->lock); |
681 | __padata_remove_cpu(pinst, cpu); | 886 | __padata_remove_cpu(pinst, cpu); |
@@ -683,7 +888,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
683 | 888 | ||
684 | case CPU_DOWN_FAILED: | 889 | case CPU_DOWN_FAILED: |
685 | case CPU_DOWN_FAILED_FROZEN: | 890 | case CPU_DOWN_FAILED_FROZEN: |
686 | if (!cpumask_test_cpu(cpu, pinst->cpumask)) | 891 | if (!pinst_has_cpu(pinst, cpu)) |
687 | break; | 892 | break; |
688 | mutex_lock(&pinst->lock); | 893 | mutex_lock(&pinst->lock); |
689 | __padata_add_cpu(pinst, cpu); | 894 | __padata_add_cpu(pinst, cpu); |
@@ -694,36 +899,202 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
694 | } | 899 | } |
695 | #endif | 900 | #endif |
696 | 901 | ||
902 | static void __padata_free(struct padata_instance *pinst) | ||
903 | { | ||
904 | #ifdef CONFIG_HOTPLUG_CPU | ||
905 | unregister_hotcpu_notifier(&pinst->cpu_notifier); | ||
906 | #endif | ||
907 | |||
908 | padata_stop(pinst); | ||
909 | padata_free_pd(pinst->pd); | ||
910 | free_cpumask_var(pinst->cpumask.pcpu); | ||
911 | free_cpumask_var(pinst->cpumask.cbcpu); | ||
912 | kfree(pinst); | ||
913 | } | ||
914 | |||
915 | #define kobj2pinst(_kobj) \ | ||
916 | container_of(_kobj, struct padata_instance, kobj) | ||
917 | #define attr2pentry(_attr) \ | ||
918 | container_of(_attr, struct padata_sysfs_entry, attr) | ||
919 | |||
920 | static void padata_sysfs_release(struct kobject *kobj) | ||
921 | { | ||
922 | struct padata_instance *pinst = kobj2pinst(kobj); | ||
923 | __padata_free(pinst); | ||
924 | } | ||
925 | |||
926 | struct padata_sysfs_entry { | ||
927 | struct attribute attr; | ||
928 | ssize_t (*show)(struct padata_instance *, struct attribute *, char *); | ||
929 | ssize_t (*store)(struct padata_instance *, struct attribute *, | ||
930 | const char *, size_t); | ||
931 | }; | ||
932 | |||
933 | static ssize_t show_cpumask(struct padata_instance *pinst, | ||
934 | struct attribute *attr, char *buf) | ||
935 | { | ||
936 | struct cpumask *cpumask; | ||
937 | ssize_t len; | ||
938 | |||
939 | mutex_lock(&pinst->lock); | ||
940 | if (!strcmp(attr->name, "serial_cpumask")) | ||
941 | cpumask = pinst->cpumask.cbcpu; | ||
942 | else | ||
943 | cpumask = pinst->cpumask.pcpu; | ||
944 | |||
945 | len = bitmap_scnprintf(buf, PAGE_SIZE, cpumask_bits(cpumask), | ||
946 | nr_cpu_ids); | ||
947 | if (PAGE_SIZE - len < 2) | ||
948 | len = -EINVAL; | ||
949 | else | ||
950 | len += sprintf(buf + len, "\n"); | ||
951 | |||
952 | mutex_unlock(&pinst->lock); | ||
953 | return len; | ||
954 | } | ||
955 | |||
956 | static ssize_t store_cpumask(struct padata_instance *pinst, | ||
957 | struct attribute *attr, | ||
958 | const char *buf, size_t count) | ||
959 | { | ||
960 | cpumask_var_t new_cpumask; | ||
961 | ssize_t ret; | ||
962 | int mask_type; | ||
963 | |||
964 | if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL)) | ||
965 | return -ENOMEM; | ||
966 | |||
967 | ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask), | ||
968 | nr_cpumask_bits); | ||
969 | if (ret < 0) | ||
970 | goto out; | ||
971 | |||
972 | mask_type = !strcmp(attr->name, "serial_cpumask") ? | ||
973 | PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL; | ||
974 | ret = padata_set_cpumask(pinst, mask_type, new_cpumask); | ||
975 | if (!ret) | ||
976 | ret = count; | ||
977 | |||
978 | out: | ||
979 | free_cpumask_var(new_cpumask); | ||
980 | return ret; | ||
981 | } | ||
982 | |||
983 | #define PADATA_ATTR_RW(_name, _show_name, _store_name) \ | ||
984 | static struct padata_sysfs_entry _name##_attr = \ | ||
985 | __ATTR(_name, 0644, _show_name, _store_name) | ||
986 | #define PADATA_ATTR_RO(_name, _show_name) \ | ||
987 | static struct padata_sysfs_entry _name##_attr = \ | ||
988 | __ATTR(_name, 0400, _show_name, NULL) | ||
989 | |||
990 | PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask); | ||
991 | PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask); | ||
992 | |||
993 | /* | ||
994 | * Padata sysfs provides the following objects: | ||
995 | * serial_cpumask [RW] - cpumask for serial workers | ||
996 | * parallel_cpumask [RW] - cpumask for parallel workers | ||
997 | */ | ||
998 | static struct attribute *padata_default_attrs[] = { | ||
999 | &serial_cpumask_attr.attr, | ||
1000 | ¶llel_cpumask_attr.attr, | ||
1001 | NULL, | ||
1002 | }; | ||
1003 | |||
1004 | static ssize_t padata_sysfs_show(struct kobject *kobj, | ||
1005 | struct attribute *attr, char *buf) | ||
1006 | { | ||
1007 | struct padata_instance *pinst; | ||
1008 | struct padata_sysfs_entry *pentry; | ||
1009 | ssize_t ret = -EIO; | ||
1010 | |||
1011 | pinst = kobj2pinst(kobj); | ||
1012 | pentry = attr2pentry(attr); | ||
1013 | if (pentry->show) | ||
1014 | ret = pentry->show(pinst, attr, buf); | ||
1015 | |||
1016 | return ret; | ||
1017 | } | ||
1018 | |||
1019 | static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr, | ||
1020 | const char *buf, size_t count) | ||
1021 | { | ||
1022 | struct padata_instance *pinst; | ||
1023 | struct padata_sysfs_entry *pentry; | ||
1024 | ssize_t ret = -EIO; | ||
1025 | |||
1026 | pinst = kobj2pinst(kobj); | ||
1027 | pentry = attr2pentry(attr); | ||
1028 | if (pentry->show) | ||
1029 | ret = pentry->store(pinst, attr, buf, count); | ||
1030 | |||
1031 | return ret; | ||
1032 | } | ||
1033 | |||
1034 | static const struct sysfs_ops padata_sysfs_ops = { | ||
1035 | .show = padata_sysfs_show, | ||
1036 | .store = padata_sysfs_store, | ||
1037 | }; | ||
1038 | |||
1039 | static struct kobj_type padata_attr_type = { | ||
1040 | .sysfs_ops = &padata_sysfs_ops, | ||
1041 | .default_attrs = padata_default_attrs, | ||
1042 | .release = padata_sysfs_release, | ||
1043 | }; | ||
1044 | |||
697 | /** | 1045 | /** |
698 | * padata_alloc - allocate and initialize a padata instance | 1046 | * padata_alloc_possible - Allocate and initialize padata instance. |
1047 | * Use the cpu_possible_mask for serial and | ||
1048 | * parallel workers. | ||
699 | * | 1049 | * |
700 | * @cpumask: cpumask that padata uses for parallelization | ||
701 | * @wq: workqueue to use for the allocated padata instance | 1050 | * @wq: workqueue to use for the allocated padata instance |
702 | */ | 1051 | */ |
703 | struct padata_instance *padata_alloc(const struct cpumask *cpumask, | 1052 | struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq) |
704 | struct workqueue_struct *wq) | 1053 | { |
1054 | return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); | ||
1055 | } | ||
1056 | EXPORT_SYMBOL(padata_alloc_possible); | ||
1057 | |||
1058 | /** | ||
1059 | * padata_alloc - allocate and initialize a padata instance and specify | ||
1060 | * cpumasks for serial and parallel workers. | ||
1061 | * | ||
1062 | * @wq: workqueue to use for the allocated padata instance | ||
1063 | * @pcpumask: cpumask that will be used for padata parallelization | ||
1064 | * @cbcpumask: cpumask that will be used for padata serialization | ||
1065 | */ | ||
1066 | struct padata_instance *padata_alloc(struct workqueue_struct *wq, | ||
1067 | const struct cpumask *pcpumask, | ||
1068 | const struct cpumask *cbcpumask) | ||
705 | { | 1069 | { |
706 | struct padata_instance *pinst; | 1070 | struct padata_instance *pinst; |
707 | struct parallel_data *pd; | 1071 | struct parallel_data *pd = NULL; |
708 | 1072 | ||
709 | pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); | 1073 | pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); |
710 | if (!pinst) | 1074 | if (!pinst) |
711 | goto err; | 1075 | goto err; |
712 | 1076 | ||
713 | get_online_cpus(); | 1077 | get_online_cpus(); |
714 | 1078 | if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) | |
715 | pd = padata_alloc_pd(pinst, cpumask); | ||
716 | if (!pd) | ||
717 | goto err_free_inst; | 1079 | goto err_free_inst; |
1080 | if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { | ||
1081 | free_cpumask_var(pinst->cpumask.pcpu); | ||
1082 | goto err_free_inst; | ||
1083 | } | ||
1084 | if (!padata_validate_cpumask(pinst, pcpumask) || | ||
1085 | !padata_validate_cpumask(pinst, cbcpumask)) | ||
1086 | goto err_free_masks; | ||
718 | 1087 | ||
719 | if (!alloc_cpumask_var(&pinst->cpumask, GFP_KERNEL)) | 1088 | pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); |
720 | goto err_free_pd; | 1089 | if (!pd) |
1090 | goto err_free_masks; | ||
721 | 1091 | ||
722 | rcu_assign_pointer(pinst->pd, pd); | 1092 | rcu_assign_pointer(pinst->pd, pd); |
723 | 1093 | ||
724 | pinst->wq = wq; | 1094 | pinst->wq = wq; |
725 | 1095 | ||
726 | cpumask_copy(pinst->cpumask, cpumask); | 1096 | cpumask_copy(pinst->cpumask.pcpu, pcpumask); |
1097 | cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); | ||
727 | 1098 | ||
728 | pinst->flags = 0; | 1099 | pinst->flags = 0; |
729 | 1100 | ||
@@ -735,12 +1106,15 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask, | |||
735 | 1106 | ||
736 | put_online_cpus(); | 1107 | put_online_cpus(); |
737 | 1108 | ||
1109 | BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); | ||
1110 | kobject_init(&pinst->kobj, &padata_attr_type); | ||
738 | mutex_init(&pinst->lock); | 1111 | mutex_init(&pinst->lock); |
739 | 1112 | ||
740 | return pinst; | 1113 | return pinst; |
741 | 1114 | ||
742 | err_free_pd: | 1115 | err_free_masks: |
743 | padata_free_pd(pd); | 1116 | free_cpumask_var(pinst->cpumask.pcpu); |
1117 | free_cpumask_var(pinst->cpumask.cbcpu); | ||
744 | err_free_inst: | 1118 | err_free_inst: |
745 | kfree(pinst); | 1119 | kfree(pinst); |
746 | put_online_cpus(); | 1120 | put_online_cpus(); |
@@ -756,19 +1130,6 @@ EXPORT_SYMBOL(padata_alloc); | |||
756 | */ | 1130 | */ |
757 | void padata_free(struct padata_instance *pinst) | 1131 | void padata_free(struct padata_instance *pinst) |
758 | { | 1132 | { |
759 | padata_stop(pinst); | 1133 | kobject_put(&pinst->kobj); |
760 | |||
761 | synchronize_rcu(); | ||
762 | |||
763 | #ifdef CONFIG_HOTPLUG_CPU | ||
764 | unregister_hotcpu_notifier(&pinst->cpu_notifier); | ||
765 | #endif | ||
766 | get_online_cpus(); | ||
767 | padata_flush_queues(pinst->pd); | ||
768 | put_online_cpus(); | ||
769 | |||
770 | padata_free_pd(pinst->pd); | ||
771 | free_cpumask_var(pinst->cpumask); | ||
772 | kfree(pinst); | ||
773 | } | 1134 | } |
774 | EXPORT_SYMBOL(padata_free); | 1135 | EXPORT_SYMBOL(padata_free); |