aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-13 17:55:21 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-13 17:55:21 -0500
commit8ceafbfa91ffbdbb2afaea5c24ccb519ffb8b587 (patch)
tree98c9ea93362536f1ddd73175b13b7847583350df /drivers/crypto
parent42a2d923cc349583ebf6fdd52a7d35e1c2f7e6bd (diff)
parent26ba47b18318abe7dadbe9294a611c0e932651d8 (diff)
Merge branch 'for-linus-dma-masks' of git://git.linaro.org/people/rmk/linux-arm
Pull DMA mask updates from Russell King: "This series cleans up the handling of DMA masks in a lot of drivers, fixing some bugs as we go. Some of the more serious errors include: - drivers which only set their coherent DMA mask if the attempt to set the streaming mask fails. - drivers which test for a NULL dma mask pointer, and then set the dma mask pointer to a location in their module .data section - which will cause problems if the module is reloaded. To counter these, I have introduced two helper functions: - dma_set_mask_and_coherent() takes care of setting both the streaming and coherent masks at the same time, with the correct error handling as specified by the API. - dma_coerce_mask_and_coherent() which resolves the problem of drivers forcefully setting DMA masks. This is more a marker for future work to further clean these locations up - the code which creates the devices really should be initialising these, but to fix that in one go along with this change could potentially be very disruptive. The last thing this series does is prise away some of Linux's addition to "DMA addresses are physical addresses and RAM always starts at zero". We have ARM LPAE systems where all system memory is above 4GB physical, hence having DMA masks interpreted by (eg) the block layers as describing physical addresses in the range 0..DMAMASK fails on these platforms. Santosh Shilimkar addresses this in this series; the patches were copied to the appropriate people multiple times but were ignored. Fixing this also gets rid of some ARM weirdness in the setup of the max*pfn variables, and brings ARM into line with every other Linux architecture as far as those go" * 'for-linus-dma-masks' of git://git.linaro.org/people/rmk/linux-arm: (52 commits) ARM: 7805/1: mm: change max*pfn to include the physical offset of memory ARM: 7797/1: mmc: Use dma_max_pfn(dev) helper for bounce_limit calculations ARM: 7796/1: scsi: Use dma_max_pfn(dev) helper for bounce_limit calculations ARM: 7795/1: mm: dma-mapping: Add dma_max_pfn(dev) helper function ARM: 7794/1: block: Rename parameter dma_mask to max_addr for blk_queue_bounce_limit() ARM: DMA-API: better handing of DMA masks for coherent allocations ARM: 7857/1: dma: imx-sdma: setup dma mask DMA-API: firmware/google/gsmi.c: avoid direct access to DMA masks DMA-API: dcdbas: update DMA mask handing DMA-API: dma: edma.c: no need to explicitly initialize DMA masks DMA-API: usb: musb: use platform_device_register_full() to avoid directly messing with dma masks DMA-API: crypto: remove last references to 'static struct device *dev' DMA-API: crypto: fix ixp4xx crypto platform device support DMA-API: others: use dma_set_coherent_mask() DMA-API: staging: use dma_set_coherent_mask() DMA-API: usb: use new dma_coerce_mask_and_coherent() DMA-API: usb: use dma_set_coherent_mask() DMA-API: parport: parport_pc.c: use dma_coerce_mask_and_coherent() DMA-API: net: octeon: use dma_coerce_mask_and_coherent() DMA-API: net: nxp/lpc_eth: use dma_coerce_mask_and_coherent() ...
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/ixp4xx_crypto.c48
1 files changed, 24 insertions, 24 deletions
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 21180d6cad6e..214357e12dc0 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -218,23 +218,9 @@ static dma_addr_t crypt_phys;
218 218
219static int support_aes = 1; 219static int support_aes = 1;
220 220
221static void dev_release(struct device *dev)
222{
223 return;
224}
225
226#define DRIVER_NAME "ixp4xx_crypto" 221#define DRIVER_NAME "ixp4xx_crypto"
227static struct platform_device pseudo_dev = {
228 .name = DRIVER_NAME,
229 .id = 0,
230 .num_resources = 0,
231 .dev = {
232 .coherent_dma_mask = DMA_BIT_MASK(32),
233 .release = dev_release,
234 }
235};
236 222
237static struct device *dev = &pseudo_dev.dev; 223static struct platform_device *pdev;
238 224
239static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt) 225static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
240{ 226{
@@ -263,6 +249,7 @@ static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
263 249
264static int setup_crypt_desc(void) 250static int setup_crypt_desc(void)
265{ 251{
252 struct device *dev = &pdev->dev;
266 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); 253 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
267 crypt_virt = dma_alloc_coherent(dev, 254 crypt_virt = dma_alloc_coherent(dev,
268 NPE_QLEN * sizeof(struct crypt_ctl), 255 NPE_QLEN * sizeof(struct crypt_ctl),
@@ -363,6 +350,7 @@ static void finish_scattered_hmac(struct crypt_ctl *crypt)
363 350
364static void one_packet(dma_addr_t phys) 351static void one_packet(dma_addr_t phys)
365{ 352{
353 struct device *dev = &pdev->dev;
366 struct crypt_ctl *crypt; 354 struct crypt_ctl *crypt;
367 struct ixp_ctx *ctx; 355 struct ixp_ctx *ctx;
368 int failed; 356 int failed;
@@ -432,7 +420,7 @@ static void crypto_done_action(unsigned long arg)
432 tasklet_schedule(&crypto_done_tasklet); 420 tasklet_schedule(&crypto_done_tasklet);
433} 421}
434 422
435static int init_ixp_crypto(void) 423static int init_ixp_crypto(struct device *dev)
436{ 424{
437 int ret = -ENODEV; 425 int ret = -ENODEV;
438 u32 msg[2] = { 0, 0 }; 426 u32 msg[2] = { 0, 0 };
@@ -519,7 +507,7 @@ err:
519 return ret; 507 return ret;
520} 508}
521 509
522static void release_ixp_crypto(void) 510static void release_ixp_crypto(struct device *dev)
523{ 511{
524 qmgr_disable_irq(RECV_QID); 512 qmgr_disable_irq(RECV_QID);
525 tasklet_kill(&crypto_done_tasklet); 513 tasklet_kill(&crypto_done_tasklet);
@@ -886,6 +874,7 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
886 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL; 874 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
887 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req); 875 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
888 struct buffer_desc src_hook; 876 struct buffer_desc src_hook;
877 struct device *dev = &pdev->dev;
889 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 878 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
890 GFP_KERNEL : GFP_ATOMIC; 879 GFP_KERNEL : GFP_ATOMIC;
891 880
@@ -1010,6 +999,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
1010 unsigned int cryptlen; 999 unsigned int cryptlen;
1011 struct buffer_desc *buf, src_hook; 1000 struct buffer_desc *buf, src_hook;
1012 struct aead_ctx *req_ctx = aead_request_ctx(req); 1001 struct aead_ctx *req_ctx = aead_request_ctx(req);
1002 struct device *dev = &pdev->dev;
1013 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 1003 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1014 GFP_KERNEL : GFP_ATOMIC; 1004 GFP_KERNEL : GFP_ATOMIC;
1015 1005
@@ -1418,20 +1408,30 @@ static struct ixp_alg ixp4xx_algos[] = {
1418} }; 1408} };
1419 1409
1420#define IXP_POSTFIX "-ixp4xx" 1410#define IXP_POSTFIX "-ixp4xx"
1411
1412static const struct platform_device_info ixp_dev_info __initdata = {
1413 .name = DRIVER_NAME,
1414 .id = 0,
1415 .dma_mask = DMA_BIT_MASK(32),
1416};
1417
1421static int __init ixp_module_init(void) 1418static int __init ixp_module_init(void)
1422{ 1419{
1423 int num = ARRAY_SIZE(ixp4xx_algos); 1420 int num = ARRAY_SIZE(ixp4xx_algos);
1424 int i,err ; 1421 int i, err ;
1425 1422
1426 if (platform_device_register(&pseudo_dev)) 1423 pdev = platform_device_register_full(&ixp_dev_info);
1427 return -ENODEV; 1424 if (IS_ERR(pdev))
1425 return PTR_ERR(pdev);
1426
1427 dev = &pdev->dev;
1428 1428
1429 spin_lock_init(&desc_lock); 1429 spin_lock_init(&desc_lock);
1430 spin_lock_init(&emerg_lock); 1430 spin_lock_init(&emerg_lock);
1431 1431
1432 err = init_ixp_crypto(); 1432 err = init_ixp_crypto(&pdev->dev);
1433 if (err) { 1433 if (err) {
1434 platform_device_unregister(&pseudo_dev); 1434 platform_device_unregister(pdev);
1435 return err; 1435 return err;
1436 } 1436 }
1437 for (i=0; i< num; i++) { 1437 for (i=0; i< num; i++) {
@@ -1495,8 +1495,8 @@ static void __exit ixp_module_exit(void)
1495 if (ixp4xx_algos[i].registered) 1495 if (ixp4xx_algos[i].registered)
1496 crypto_unregister_alg(&ixp4xx_algos[i].crypto); 1496 crypto_unregister_alg(&ixp4xx_algos[i].crypto);
1497 } 1497 }
1498 release_ixp_crypto(); 1498 release_ixp_crypto(&pdev->dev);
1499 platform_device_unregister(&pseudo_dev); 1499 platform_device_unregister(pdev);
1500} 1500}
1501 1501
1502module_init(ixp_module_init); 1502module_init(ixp_module_init);