aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/mv_cesa.c
diff options
context:
space:
mode:
authorBoris BREZILLON <boris.brezillon@free-electrons.com>2015-06-18 09:46:18 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2015-06-19 10:18:02 -0400
commit51b44fc81178136bca88565dad07c067c8dc51da (patch)
treef59a15c9ae84ea0bdf05624a47bc399f0c543e03 /drivers/crypto/mv_cesa.c
parent1c075486852920241ace0f8087498c3ef6522fb2 (diff)
crypto: mv_cesa - use gen_pool to reserve the SRAM memory region
The mv_cesa driver currently expects the SRAM memory region to be passed as a platform device resource. This approach implies two drawbacks: - the DT representation is wrong - the only one that can access the SRAM is the crypto engine The last point is particularly annoying in some cases: for example on armada 370, a small region of the crypto SRAM is used to implement the cpuidle, which means you would not be able to enable both cpuidle and the CESA driver. To address that problem, we explicitly define the SRAM device in the DT and then reference the sram node from the crypto engine node. Also note that the old way of retrieving the SRAM memory region is still supported, or in other words, backward compatibility is preserved. Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/mv_cesa.c')
-rw-r--r--drivers/crypto/mv_cesa.c58
1 files changed, 43 insertions, 15 deletions
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index eb645c2cf3eb..e31d82c1eebe 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -9,6 +9,7 @@
9#include <crypto/aes.h> 9#include <crypto/aes.h>
10#include <crypto/algapi.h> 10#include <crypto/algapi.h>
11#include <linux/crypto.h> 11#include <linux/crypto.h>
12#include <linux/genalloc.h>
12#include <linux/interrupt.h> 13#include <linux/interrupt.h>
13#include <linux/io.h> 14#include <linux/io.h>
14#include <linux/kthread.h> 15#include <linux/kthread.h>
@@ -29,6 +30,8 @@
29#define MAX_HW_HASH_SIZE 0xFFFF 30#define MAX_HW_HASH_SIZE 0xFFFF
30#define MV_CESA_EXPIRE 500 /* msec */ 31#define MV_CESA_EXPIRE 500 /* msec */
31 32
33#define MV_CESA_DEFAULT_SRAM_SIZE 2048
34
32/* 35/*
33 * STM: 36 * STM:
34 * /---------------------------------------\ 37 * /---------------------------------------\
@@ -83,6 +86,8 @@ struct req_progress {
83struct crypto_priv { 86struct crypto_priv {
84 void __iomem *reg; 87 void __iomem *reg;
85 void __iomem *sram; 88 void __iomem *sram;
89 struct gen_pool *sram_pool;
90 dma_addr_t sram_dma;
86 int irq; 91 int irq;
87 struct clk *clk; 92 struct clk *clk;
88 struct task_struct *queue_th; 93 struct task_struct *queue_th;
@@ -1019,6 +1024,39 @@ static struct ahash_alg mv_hmac_sha1_alg = {
1019 } 1024 }
1020}; 1025};
1021 1026
1027static int mv_cesa_get_sram(struct platform_device *pdev,
1028 struct crypto_priv *cp)
1029{
1030 struct resource *res;
1031 u32 sram_size = MV_CESA_DEFAULT_SRAM_SIZE;
1032
1033 of_property_read_u32(pdev->dev.of_node, "marvell,crypto-sram-size",
1034 &sram_size);
1035
1036 cp->sram_size = sram_size;
1037 cp->sram_pool = of_get_named_gen_pool(&pdev->dev.of_node,
1038 "marvell,crypto-srams", 0);
1039 if (cp->sram_pool) {
1040 cp->sram = gen_pool_dma_alloc(cp->sram_pool, sram_size,
1041 &cp->sram_dma);
1042 if (cp->sram)
1043 return 0;
1044
1045 return -ENOMEM;
1046 }
1047
1048 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1049 "sram");
1050 if (!res || resource_size(res) < cp->sram_size)
1051 return -EINVAL;
1052
1053 cp->sram = devm_ioremap_resource(&pdev->dev, res);
1054 if (IS_ERR(cp->sram))
1055 return PTR_ERR(cp->sram);
1056
1057 return 0;
1058}
1059
1022static int mv_probe(struct platform_device *pdev) 1060static int mv_probe(struct platform_device *pdev)
1023{ 1061{
1024 struct crypto_priv *cp; 1062 struct crypto_priv *cp;
@@ -1047,18 +1085,11 @@ static int mv_probe(struct platform_device *pdev)
1047 goto err; 1085 goto err;
1048 } 1086 }
1049 1087
1050 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); 1088 ret = mv_cesa_get_sram(pdev, cp);
1051 if (!res) { 1089 if (ret)
1052 ret = -ENXIO;
1053 goto err; 1090 goto err;
1054 } 1091
1055 cp->sram_size = resource_size(res);
1056 cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; 1092 cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
1057 cp->sram = ioremap(res->start, cp->sram_size);
1058 if (!cp->sram) {
1059 ret = -ENOMEM;
1060 goto err;
1061 }
1062 1093
1063 if (pdev->dev.of_node) 1094 if (pdev->dev.of_node)
1064 irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 1095 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
@@ -1066,7 +1097,7 @@ static int mv_probe(struct platform_device *pdev)
1066 irq = platform_get_irq(pdev, 0); 1097 irq = platform_get_irq(pdev, 0);
1067 if (irq < 0 || irq == NO_IRQ) { 1098 if (irq < 0 || irq == NO_IRQ) {
1068 ret = irq; 1099 ret = irq;
1069 goto err_unmap_sram; 1100 goto err;
1070 } 1101 }
1071 cp->irq = irq; 1102 cp->irq = irq;
1072 1103
@@ -1076,7 +1107,7 @@ static int mv_probe(struct platform_device *pdev)
1076 cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); 1107 cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
1077 if (IS_ERR(cp->queue_th)) { 1108 if (IS_ERR(cp->queue_th)) {
1078 ret = PTR_ERR(cp->queue_th); 1109 ret = PTR_ERR(cp->queue_th);
1079 goto err_unmap_sram; 1110 goto err;
1080 } 1111 }
1081 1112
1082 ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev), 1113 ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev),
@@ -1134,8 +1165,6 @@ err_irq:
1134 } 1165 }
1135err_thread: 1166err_thread:
1136 kthread_stop(cp->queue_th); 1167 kthread_stop(cp->queue_th);
1137err_unmap_sram:
1138 iounmap(cp->sram);
1139err: 1168err:
1140 kfree(cp); 1169 kfree(cp);
1141 cpg = NULL; 1170 cpg = NULL;
@@ -1155,7 +1184,6 @@ static int mv_remove(struct platform_device *pdev)
1155 kthread_stop(cp->queue_th); 1184 kthread_stop(cp->queue_th);
1156 free_irq(cp->irq, cp); 1185 free_irq(cp->irq, cp);
1157 memset(cp->sram, 0, cp->sram_size); 1186 memset(cp->sram, 0, cp->sram_size);
1158 iounmap(cp->sram);
1159 1187
1160 if (!IS_ERR(cp->clk)) { 1188 if (!IS_ERR(cp->clk)) {
1161 clk_disable_unprepare(cp->clk); 1189 clk_disable_unprepare(cp->clk);