aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/hw_random/Kconfig13
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/amd-rng.c4
-rw-r--r--drivers/char/hw_random/geode-rng.c4
-rw-r--r--drivers/char/hw_random/intel-rng.c13
-rw-r--r--drivers/char/hw_random/pasemi-rng.c2
-rw-r--r--drivers/char/hw_random/pseries-rng.c2
-rw-r--r--drivers/char/hw_random/via-rng.c8
-rw-r--r--drivers/char/hw_random/xgene-rng.c423
-rw-r--r--drivers/crypto/caam/caamhash.c28
-rw-r--r--drivers/crypto/caam/ctrl.c138
-rw-r--r--drivers/crypto/caam/intern.h9
-rw-r--r--drivers/crypto/caam/regs.h51
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h54
-rw-r--r--drivers/crypto/mv_cesa.h1
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_internal.h2
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c66
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_isr.c14
19 files changed, 601 insertions, 234 deletions
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 836b061ced35..91a04ae8003c 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -333,6 +333,19 @@ config HW_RANDOM_MSM
333 333
334 If unsure, say Y. 334 If unsure, say Y.
335 335
336config HW_RANDOM_XGENE
337 tristate "APM X-Gene True Random Number Generator (TRNG) support"
338 depends on HW_RANDOM && ARCH_XGENE
339 default HW_RANDOM
340 ---help---
341 This driver provides kernel-side support for the Random Number
342 Generator hardware found on APM X-Gene SoC.
343
344 To compile this driver as a module, choose M here: the
345 module will be called xgene_rng.
346
347 If unsure, say Y.
348
336endif # HW_RANDOM 349endif # HW_RANDOM
337 350
338config UML_RANDOM 351config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 199ed283e149..0b4cd57f4e24 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -29,3 +29,4 @@ obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
29obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o 29obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
30obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o 30obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
31obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o 31obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
32obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index c6af038682f1..48f6a83cdd61 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -142,10 +142,10 @@ found:
142 amd_rng.priv = (unsigned long)pmbase; 142 amd_rng.priv = (unsigned long)pmbase;
143 amd_pdev = pdev; 143 amd_pdev = pdev;
144 144
145 printk(KERN_INFO "AMD768 RNG detected\n"); 145 pr_info("AMD768 RNG detected\n");
146 err = hwrng_register(&amd_rng); 146 err = hwrng_register(&amd_rng);
147 if (err) { 147 if (err) {
148 printk(KERN_ERR PFX "RNG registering failed (%d)\n", 148 pr_err(PFX "RNG registering failed (%d)\n",
149 err); 149 err);
150 release_region(pmbase + 0xF0, 8); 150 release_region(pmbase + 0xF0, 8);
151 goto out; 151 goto out;
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index 4c4d4e140f98..0d0579fe465e 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -109,10 +109,10 @@ found:
109 goto out; 109 goto out;
110 geode_rng.priv = (unsigned long)mem; 110 geode_rng.priv = (unsigned long)mem;
111 111
112 printk(KERN_INFO "AMD Geode RNG detected\n"); 112 pr_info("AMD Geode RNG detected\n");
113 err = hwrng_register(&geode_rng); 113 err = hwrng_register(&geode_rng);
114 if (err) { 114 if (err) {
115 printk(KERN_ERR PFX "RNG registering failed (%d)\n", 115 pr_err(PFX "RNG registering failed (%d)\n",
116 err); 116 err);
117 goto err_unmap; 117 goto err_unmap;
118 } 118 }
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
index 86fe45c19968..290c880266bf 100644
--- a/drivers/char/hw_random/intel-rng.c
+++ b/drivers/char/hw_random/intel-rng.c
@@ -199,7 +199,7 @@ static int intel_rng_init(struct hwrng *rng)
199 if ((hw_status & INTEL_RNG_ENABLED) == 0) 199 if ((hw_status & INTEL_RNG_ENABLED) == 0)
200 hw_status = hwstatus_set(mem, hw_status | INTEL_RNG_ENABLED); 200 hw_status = hwstatus_set(mem, hw_status | INTEL_RNG_ENABLED);
201 if ((hw_status & INTEL_RNG_ENABLED) == 0) { 201 if ((hw_status & INTEL_RNG_ENABLED) == 0) {
202 printk(KERN_ERR PFX "cannot enable RNG, aborting\n"); 202 pr_err(PFX "cannot enable RNG, aborting\n");
203 goto out; 203 goto out;
204 } 204 }
205 err = 0; 205 err = 0;
@@ -216,7 +216,7 @@ static void intel_rng_cleanup(struct hwrng *rng)
216 if (hw_status & INTEL_RNG_ENABLED) 216 if (hw_status & INTEL_RNG_ENABLED)
217 hwstatus_set(mem, hw_status & ~INTEL_RNG_ENABLED); 217 hwstatus_set(mem, hw_status & ~INTEL_RNG_ENABLED);
218 else 218 else
219 printk(KERN_WARNING PFX "unusual: RNG already disabled\n"); 219 pr_warn(PFX "unusual: RNG already disabled\n");
220} 220}
221 221
222 222
@@ -274,7 +274,7 @@ static int __init intel_rng_hw_init(void *_intel_rng_hw)
274 if (mfc != INTEL_FWH_MANUFACTURER_CODE || 274 if (mfc != INTEL_FWH_MANUFACTURER_CODE ||
275 (dvc != INTEL_FWH_DEVICE_CODE_8M && 275 (dvc != INTEL_FWH_DEVICE_CODE_8M &&
276 dvc != INTEL_FWH_DEVICE_CODE_4M)) { 276 dvc != INTEL_FWH_DEVICE_CODE_4M)) {
277 printk(KERN_NOTICE PFX "FWH not detected\n"); 277 pr_notice(PFX "FWH not detected\n");
278 return -ENODEV; 278 return -ENODEV;
279 } 279 }
280 280
@@ -306,7 +306,6 @@ static int __init intel_init_hw_struct(struct intel_rng_hw *intel_rng_hw,
306 (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK)) 306 (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK))
307 == BIOS_CNTL_LOCK_ENABLE_MASK) { 307 == BIOS_CNTL_LOCK_ENABLE_MASK) {
308 static __initdata /*const*/ char warning[] = 308 static __initdata /*const*/ char warning[] =
309 KERN_WARNING
310PFX "Firmware space is locked read-only. If you can't or\n" 309PFX "Firmware space is locked read-only. If you can't or\n"
311PFX "don't want to disable this in firmware setup, and if\n" 310PFX "don't want to disable this in firmware setup, and if\n"
312PFX "you are certain that your system has a functional\n" 311PFX "you are certain that your system has a functional\n"
@@ -314,7 +313,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
314 313
315 if (no_fwh_detect) 314 if (no_fwh_detect)
316 return -ENODEV; 315 return -ENODEV;
317 printk(warning); 316 pr_warn("%s", warning);
318 return -EBUSY; 317 return -EBUSY;
319 } 318 }
320 319
@@ -392,10 +391,10 @@ fwh_done:
392 goto out; 391 goto out;
393 } 392 }
394 393
395 printk(KERN_INFO "Intel 82802 RNG detected\n"); 394 pr_info("Intel 82802 RNG detected\n");
396 err = hwrng_register(&intel_rng); 395 err = hwrng_register(&intel_rng);
397 if (err) { 396 if (err) {
398 printk(KERN_ERR PFX "RNG registering failed (%d)\n", 397 pr_err(PFX "RNG registering failed (%d)\n",
399 err); 398 err);
400 iounmap(mem); 399 iounmap(mem);
401 } 400 }
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index c66279bb6ef3..c0347d1dded0 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -113,7 +113,7 @@ static int rng_probe(struct platform_device *ofdev)
113 113
114 pasemi_rng.priv = (unsigned long)rng_regs; 114 pasemi_rng.priv = (unsigned long)rng_regs;
115 115
116 printk(KERN_INFO "Registering PA Semi RNG\n"); 116 pr_info("Registering PA Semi RNG\n");
117 117
118 err = hwrng_register(&pasemi_rng); 118 err = hwrng_register(&pasemi_rng);
119 119
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
index ab7ffdec0ec3..6226aa08c36a 100644
--- a/drivers/char/hw_random/pseries-rng.c
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -86,7 +86,7 @@ static struct vio_driver pseries_rng_driver = {
86 86
87static int __init rng_init(void) 87static int __init rng_init(void)
88{ 88{
89 printk(KERN_INFO "Registering IBM pSeries RNG driver\n"); 89 pr_info("Registering IBM pSeries RNG driver\n");
90 return vio_register_driver(&pseries_rng_driver); 90 return vio_register_driver(&pseries_rng_driver);
91} 91}
92 92
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index de5a6dcfb3e2..a3bebef255ad 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -141,7 +141,7 @@ static int via_rng_init(struct hwrng *rng)
141 * register */ 141 * register */
142 if ((c->x86 == 6) && (c->x86_model >= 0x0f)) { 142 if ((c->x86 == 6) && (c->x86_model >= 0x0f)) {
143 if (!cpu_has_xstore_enabled) { 143 if (!cpu_has_xstore_enabled) {
144 printk(KERN_ERR PFX "can't enable hardware RNG " 144 pr_err(PFX "can't enable hardware RNG "
145 "if XSTORE is not enabled\n"); 145 "if XSTORE is not enabled\n");
146 return -ENODEV; 146 return -ENODEV;
147 } 147 }
@@ -180,7 +180,7 @@ static int via_rng_init(struct hwrng *rng)
180 unneeded */ 180 unneeded */
181 rdmsr(MSR_VIA_RNG, lo, hi); 181 rdmsr(MSR_VIA_RNG, lo, hi);
182 if ((lo & VIA_RNG_ENABLE) == 0) { 182 if ((lo & VIA_RNG_ENABLE) == 0) {
183 printk(KERN_ERR PFX "cannot enable VIA C3 RNG, aborting\n"); 183 pr_err(PFX "cannot enable VIA C3 RNG, aborting\n");
184 return -ENODEV; 184 return -ENODEV;
185 } 185 }
186 186
@@ -202,10 +202,10 @@ static int __init mod_init(void)
202 202
203 if (!cpu_has_xstore) 203 if (!cpu_has_xstore)
204 return -ENODEV; 204 return -ENODEV;
205 printk(KERN_INFO "VIA RNG detected\n"); 205 pr_info("VIA RNG detected\n");
206 err = hwrng_register(&via_rng); 206 err = hwrng_register(&via_rng);
207 if (err) { 207 if (err) {
208 printk(KERN_ERR PFX "RNG registering failed (%d)\n", 208 pr_err(PFX "RNG registering failed (%d)\n",
209 err); 209 err);
210 goto out; 210 goto out;
211 } 211 }
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
new file mode 100644
index 000000000000..23caa05380a8
--- /dev/null
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -0,0 +1,423 @@
1/*
2 * APM X-Gene SoC RNG Driver
3 *
4 * Copyright (c) 2014, Applied Micro Circuits Corporation
5 * Author: Rameshwar Prasad Sahu <rsahu@apm.com>
6 * Shamal Winchurkar <swinchurkar@apm.com>
7 * Feng Kan <fkan@apm.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 *
22 */
23
24#include <linux/clk.h>
25#include <linux/delay.h>
26#include <linux/hw_random.h>
27#include <linux/init.h>
28#include <linux/interrupt.h>
29#include <linux/module.h>
30#include <linux/of_platform.h>
31#include <linux/of_irq.h>
32#include <linux/of_address.h>
33#include <linux/timer.h>
34
35#define RNG_MAX_DATUM 4
36#define MAX_TRY 100
37#define XGENE_RNG_RETRY_COUNT 20
38#define XGENE_RNG_RETRY_INTERVAL 10
39
40/* RNG Registers */
41#define RNG_INOUT_0 0x00
42#define RNG_INTR_STS_ACK 0x10
43#define RNG_CONTROL 0x14
44#define RNG_CONFIG 0x18
45#define RNG_ALARMCNT 0x1c
46#define RNG_FROENABLE 0x20
47#define RNG_FRODETUNE 0x24
48#define RNG_ALARMMASK 0x28
49#define RNG_ALARMSTOP 0x2c
50#define RNG_OPTIONS 0x78
51#define RNG_EIP_REV 0x7c
52
53#define MONOBIT_FAIL_MASK BIT(7)
54#define POKER_FAIL_MASK BIT(6)
55#define LONG_RUN_FAIL_MASK BIT(5)
56#define RUN_FAIL_MASK BIT(4)
57#define NOISE_FAIL_MASK BIT(3)
58#define STUCK_OUT_MASK BIT(2)
59#define SHUTDOWN_OFLO_MASK BIT(1)
60#define READY_MASK BIT(0)
61
62#define MAJOR_HW_REV_RD(src) (((src) & 0x0f000000) >> 24)
63#define MINOR_HW_REV_RD(src) (((src) & 0x00f00000) >> 20)
64#define HW_PATCH_LEVEL_RD(src) (((src) & 0x000f0000) >> 16)
65#define MAX_REFILL_CYCLES_SET(dst, src) \
66 ((dst & ~0xffff0000) | (((u32)src << 16) & 0xffff0000))
67#define MIN_REFILL_CYCLES_SET(dst, src) \
68 ((dst & ~0x000000ff) | (((u32)src) & 0x000000ff))
69#define ALARM_THRESHOLD_SET(dst, src) \
70 ((dst & ~0x000000ff) | (((u32)src) & 0x000000ff))
71#define ENABLE_RNG_SET(dst, src) \
72 ((dst & ~BIT(10)) | (((u32)src << 10) & BIT(10)))
73#define REGSPEC_TEST_MODE_SET(dst, src) \
74 ((dst & ~BIT(8)) | (((u32)src << 8) & BIT(8)))
75#define MONOBIT_FAIL_MASK_SET(dst, src) \
76 ((dst & ~BIT(7)) | (((u32)src << 7) & BIT(7)))
77#define POKER_FAIL_MASK_SET(dst, src) \
78 ((dst & ~BIT(6)) | (((u32)src << 6) & BIT(6)))
79#define LONG_RUN_FAIL_MASK_SET(dst, src) \
80 ((dst & ~BIT(5)) | (((u32)src << 5) & BIT(5)))
81#define RUN_FAIL_MASK_SET(dst, src) \
82 ((dst & ~BIT(4)) | (((u32)src << 4) & BIT(4)))
83#define NOISE_FAIL_MASK_SET(dst, src) \
84 ((dst & ~BIT(3)) | (((u32)src << 3) & BIT(3)))
85#define STUCK_OUT_MASK_SET(dst, src) \
86 ((dst & ~BIT(2)) | (((u32)src << 2) & BIT(2)))
87#define SHUTDOWN_OFLO_MASK_SET(dst, src) \
88 ((dst & ~BIT(1)) | (((u32)src << 1) & BIT(1)))
89
90struct xgene_rng_dev {
91 u32 irq;
92 void __iomem *csr_base;
93 u32 revision;
94 u32 datum_size;
95 u32 failure_cnt; /* Failure count last minute */
96 unsigned long failure_ts;/* First failure timestamp */
97 struct timer_list failure_timer;
98 struct device *dev;
99 struct clk *clk;
100};
101
102static void xgene_rng_expired_timer(unsigned long arg)
103{
104 struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) arg;
105
106 /* Clear failure counter as timer expired */
107 disable_irq(ctx->irq);
108 ctx->failure_cnt = 0;
109 del_timer(&ctx->failure_timer);
110 enable_irq(ctx->irq);
111}
112
113static void xgene_rng_start_timer(struct xgene_rng_dev *ctx)
114{
115 ctx->failure_timer.data = (unsigned long) ctx;
116 ctx->failure_timer.function = xgene_rng_expired_timer;
117 ctx->failure_timer.expires = jiffies + 120 * HZ;
118 add_timer(&ctx->failure_timer);
119}
120
121/*
122 * Initialize or reinit free running oscillators (FROs)
123 */
124static void xgene_rng_init_fro(struct xgene_rng_dev *ctx, u32 fro_val)
125{
126 writel(fro_val, ctx->csr_base + RNG_FRODETUNE);
127 writel(0x00000000, ctx->csr_base + RNG_ALARMMASK);
128 writel(0x00000000, ctx->csr_base + RNG_ALARMSTOP);
129 writel(0xFFFFFFFF, ctx->csr_base + RNG_FROENABLE);
130}
131
132static void xgene_rng_chk_overflow(struct xgene_rng_dev *ctx)
133{
134 u32 val;
135
136 val = readl(ctx->csr_base + RNG_INTR_STS_ACK);
137 if (val & MONOBIT_FAIL_MASK)
138 /*
139 * LFSR detected an out-of-bounds number of 1s after
140 * checking 20,000 bits (test T1 as specified in the
141 * AIS-31 standard)
142 */
143 dev_err(ctx->dev, "test monobit failure error 0x%08X\n", val);
144 if (val & POKER_FAIL_MASK)
145 /*
146 * LFSR detected an out-of-bounds value in at least one
147 * of the 16 poker_count_X counters or an out of bounds sum
148 * of squares value after checking 20,000 bits (test T2 as
149 * specified in the AIS-31 standard)
150 */
151 dev_err(ctx->dev, "test poker failure error 0x%08X\n", val);
152 if (val & LONG_RUN_FAIL_MASK)
153 /*
154 * LFSR detected a sequence of 34 identical bits
155 * (test T4 as specified in the AIS-31 standard)
156 */
157 dev_err(ctx->dev, "test long run failure error 0x%08X\n", val);
158 if (val & RUN_FAIL_MASK)
159 /*
160 * LFSR detected an outof-bounds value for at least one
161 * of the running counters after checking 20,000 bits
162 * (test T3 as specified in the AIS-31 standard)
163 */
164 dev_err(ctx->dev, "test run failure error 0x%08X\n", val);
165 if (val & NOISE_FAIL_MASK)
166 /* LFSR detected a sequence of 48 identical bits */
167 dev_err(ctx->dev, "noise failure error 0x%08X\n", val);
168 if (val & STUCK_OUT_MASK)
169 /*
170 * Detected output data registers generated same value twice
171 * in a row
172 */
173 dev_err(ctx->dev, "stuck out failure error 0x%08X\n", val);
174
175 if (val & SHUTDOWN_OFLO_MASK) {
176 u32 frostopped;
177
178 /* FROs shut down after a second error event. Try recover. */
179 if (++ctx->failure_cnt == 1) {
180 /* 1st time, just recover */
181 ctx->failure_ts = jiffies;
182 frostopped = readl(ctx->csr_base + RNG_ALARMSTOP);
183 xgene_rng_init_fro(ctx, frostopped);
184
185 /*
186 * We must start a timer to clear out this error
187 * in case the system timer wrap around
188 */
189 xgene_rng_start_timer(ctx);
190 } else {
191 /* 2nd time failure in lesser than 1 minute? */
192 if (time_after(ctx->failure_ts + 60 * HZ, jiffies)) {
193 dev_err(ctx->dev,
194 "FRO shutdown failure error 0x%08X\n",
195 val);
196 } else {
197 /* 2nd time failure after 1 minutes, recover */
198 ctx->failure_ts = jiffies;
199 ctx->failure_cnt = 1;
200 /*
201 * We must start a timer to clear out this
202 * error in case the system timer wrap
203 * around
204 */
205 xgene_rng_start_timer(ctx);
206 }
207 frostopped = readl(ctx->csr_base + RNG_ALARMSTOP);
208 xgene_rng_init_fro(ctx, frostopped);
209 }
210 }
211 /* Clear them all */
212 writel(val, ctx->csr_base + RNG_INTR_STS_ACK);
213}
214
215static irqreturn_t xgene_rng_irq_handler(int irq, void *id)
216{
217 struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) id;
218
219 /* RNG Alarm Counter overflow */
220 xgene_rng_chk_overflow(ctx);
221
222 return IRQ_HANDLED;
223}
224
225static int xgene_rng_data_present(struct hwrng *rng, int wait)
226{
227 struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv;
228 u32 i, val = 0;
229
230 for (i = 0; i < XGENE_RNG_RETRY_COUNT; i++) {
231 val = readl(ctx->csr_base + RNG_INTR_STS_ACK);
232 if ((val & READY_MASK) || !wait)
233 break;
234 udelay(XGENE_RNG_RETRY_INTERVAL);
235 }
236
237 return (val & READY_MASK);
238}
239
240static int xgene_rng_data_read(struct hwrng *rng, u32 *data)
241{
242 struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv;
243 int i;
244
245 for (i = 0; i < ctx->datum_size; i++)
246 data[i] = readl(ctx->csr_base + RNG_INOUT_0 + i * 4);
247
248 /* Clear ready bit to start next transaction */
249 writel(READY_MASK, ctx->csr_base + RNG_INTR_STS_ACK);
250
251 return ctx->datum_size << 2;
252}
253
254static void xgene_rng_init_internal(struct xgene_rng_dev *ctx)
255{
256 u32 val;
257
258 writel(0x00000000, ctx->csr_base + RNG_CONTROL);
259
260 val = MAX_REFILL_CYCLES_SET(0, 10);
261 val = MIN_REFILL_CYCLES_SET(val, 10);
262 writel(val, ctx->csr_base + RNG_CONFIG);
263
264 val = ALARM_THRESHOLD_SET(0, 0xFF);
265 writel(val, ctx->csr_base + RNG_ALARMCNT);
266
267 xgene_rng_init_fro(ctx, 0);
268
269 writel(MONOBIT_FAIL_MASK |
270 POKER_FAIL_MASK |
271 LONG_RUN_FAIL_MASK |
272 RUN_FAIL_MASK |
273 NOISE_FAIL_MASK |
274 STUCK_OUT_MASK |
275 SHUTDOWN_OFLO_MASK |
276 READY_MASK, ctx->csr_base + RNG_INTR_STS_ACK);
277
278 val = ENABLE_RNG_SET(0, 1);
279 val = MONOBIT_FAIL_MASK_SET(val, 1);
280 val = POKER_FAIL_MASK_SET(val, 1);
281 val = LONG_RUN_FAIL_MASK_SET(val, 1);
282 val = RUN_FAIL_MASK_SET(val, 1);
283 val = NOISE_FAIL_MASK_SET(val, 1);
284 val = STUCK_OUT_MASK_SET(val, 1);
285 val = SHUTDOWN_OFLO_MASK_SET(val, 1);
286 writel(val, ctx->csr_base + RNG_CONTROL);
287}
288
289static int xgene_rng_init(struct hwrng *rng)
290{
291 struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv;
292
293 ctx->failure_cnt = 0;
294 init_timer(&ctx->failure_timer);
295
296 ctx->revision = readl(ctx->csr_base + RNG_EIP_REV);
297
298 dev_dbg(ctx->dev, "Rev %d.%d.%d\n",
299 MAJOR_HW_REV_RD(ctx->revision),
300 MINOR_HW_REV_RD(ctx->revision),
301 HW_PATCH_LEVEL_RD(ctx->revision));
302
303 dev_dbg(ctx->dev, "Options 0x%08X",
304 readl(ctx->csr_base + RNG_OPTIONS));
305
306 xgene_rng_init_internal(ctx);
307
308 ctx->datum_size = RNG_MAX_DATUM;
309
310 return 0;
311}
312
313static struct hwrng xgene_rng_func = {
314 .name = "xgene-rng",
315 .init = xgene_rng_init,
316 .data_present = xgene_rng_data_present,
317 .data_read = xgene_rng_data_read,
318};
319
320static int xgene_rng_probe(struct platform_device *pdev)
321{
322 struct resource *res;
323 struct xgene_rng_dev *ctx;
324 int rc = 0;
325
326 ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
327 if (!ctx)
328 return -ENOMEM;
329
330 ctx->dev = &pdev->dev;
331 platform_set_drvdata(pdev, ctx);
332
333 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
334 ctx->csr_base = devm_ioremap_resource(&pdev->dev, res);
335 if (IS_ERR(ctx->csr_base))
336 return PTR_ERR(ctx->csr_base);
337
338 ctx->irq = platform_get_irq(pdev, 0);
339 if (ctx->irq < 0) {
340 dev_err(&pdev->dev, "No IRQ resource\n");
341 return ctx->irq;
342 }
343
344 dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d",
345 ctx->csr_base, ctx->irq);
346
347 rc = devm_request_irq(&pdev->dev, ctx->irq, xgene_rng_irq_handler, 0,
348 dev_name(&pdev->dev), ctx);
349 if (rc) {
350 dev_err(&pdev->dev, "Could not request RNG alarm IRQ\n");
351 return rc;
352 }
353
354 /* Enable IP clock */
355 ctx->clk = devm_clk_get(&pdev->dev, NULL);
356 if (IS_ERR(ctx->clk)) {
357 dev_warn(&pdev->dev, "Couldn't get the clock for RNG\n");
358 } else {
359 rc = clk_prepare_enable(ctx->clk);
360 if (rc) {
361 dev_warn(&pdev->dev,
362 "clock prepare enable failed for RNG");
363 return rc;
364 }
365 }
366
367 xgene_rng_func.priv = (unsigned long) ctx;
368
369 rc = hwrng_register(&xgene_rng_func);
370 if (rc) {
371 dev_err(&pdev->dev, "RNG registering failed error %d\n", rc);
372 if (!IS_ERR(ctx->clk))
373 clk_disable_unprepare(ctx->clk);
374 return rc;
375 }
376
377 rc = device_init_wakeup(&pdev->dev, 1);
378 if (rc) {
379 dev_err(&pdev->dev, "RNG device_init_wakeup failed error %d\n",
380 rc);
381 if (!IS_ERR(ctx->clk))
382 clk_disable_unprepare(ctx->clk);
383 hwrng_unregister(&xgene_rng_func);
384 return rc;
385 }
386
387 return 0;
388}
389
390static int xgene_rng_remove(struct platform_device *pdev)
391{
392 struct xgene_rng_dev *ctx = platform_get_drvdata(pdev);
393 int rc;
394
395 rc = device_init_wakeup(&pdev->dev, 0);
396 if (rc)
397 dev_err(&pdev->dev, "RNG init wakeup failed error %d\n", rc);
398 if (!IS_ERR(ctx->clk))
399 clk_disable_unprepare(ctx->clk);
400 hwrng_unregister(&xgene_rng_func);
401
402 return rc;
403}
404
405static const struct of_device_id xgene_rng_of_match[] = {
406 { .compatible = "apm,xgene-rng" },
407 { }
408};
409
410MODULE_DEVICE_TABLE(of, xgene_rng_of_match);
411
412static struct platform_driver xgene_rng_driver = {
413 .probe = xgene_rng_probe,
414 .remove = xgene_rng_remove,
415 .driver = {
416 .name = "xgene-rng",
417 .of_match_table = xgene_rng_of_match,
418 },
419};
420
421module_platform_driver(xgene_rng_driver);
422MODULE_DESCRIPTION("APM X-Gene RNG driver");
423MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index b464d03ebf40..f347ab7eea95 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -836,8 +836,9 @@ static int ahash_update_ctx(struct ahash_request *req)
836 edesc->sec4_sg + sec4_sg_src_index, 836 edesc->sec4_sg + sec4_sg_src_index,
837 chained); 837 chained);
838 if (*next_buflen) { 838 if (*next_buflen) {
839 sg_copy_part(next_buf, req->src, to_hash - 839 scatterwalk_map_and_copy(next_buf, req->src,
840 *buflen, req->nbytes); 840 to_hash - *buflen,
841 *next_buflen, 0);
841 state->current_buf = !state->current_buf; 842 state->current_buf = !state->current_buf;
842 } 843 }
843 } else { 844 } else {
@@ -878,7 +879,8 @@ static int ahash_update_ctx(struct ahash_request *req)
878 kfree(edesc); 879 kfree(edesc);
879 } 880 }
880 } else if (*next_buflen) { 881 } else if (*next_buflen) {
881 sg_copy(buf + *buflen, req->src, req->nbytes); 882 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
883 req->nbytes, 0);
882 *buflen = *next_buflen; 884 *buflen = *next_buflen;
883 *next_buflen = last_buflen; 885 *next_buflen = last_buflen;
884 } 886 }
@@ -1262,8 +1264,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1262 src_map_to_sec4_sg(jrdev, req->src, src_nents, 1264 src_map_to_sec4_sg(jrdev, req->src, src_nents,
1263 edesc->sec4_sg + 1, chained); 1265 edesc->sec4_sg + 1, chained);
1264 if (*next_buflen) { 1266 if (*next_buflen) {
1265 sg_copy_part(next_buf, req->src, to_hash - *buflen, 1267 scatterwalk_map_and_copy(next_buf, req->src,
1266 req->nbytes); 1268 to_hash - *buflen,
1269 *next_buflen, 0);
1267 state->current_buf = !state->current_buf; 1270 state->current_buf = !state->current_buf;
1268 } 1271 }
1269 1272
@@ -1304,7 +1307,8 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1304 kfree(edesc); 1307 kfree(edesc);
1305 } 1308 }
1306 } else if (*next_buflen) { 1309 } else if (*next_buflen) {
1307 sg_copy(buf + *buflen, req->src, req->nbytes); 1310 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1311 req->nbytes, 0);
1308 *buflen = *next_buflen; 1312 *buflen = *next_buflen;
1309 *next_buflen = 0; 1313 *next_buflen = 0;
1310 } 1314 }
@@ -1413,9 +1417,9 @@ static int ahash_update_first(struct ahash_request *req)
1413 struct device *jrdev = ctx->jrdev; 1417 struct device *jrdev = ctx->jrdev;
1414 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1418 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1415 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1419 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1416 u8 *next_buf = state->buf_0 + state->current_buf * 1420 u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
1417 CAAM_MAX_HASH_BLOCK_SIZE; 1421 int *next_buflen = state->current_buf ?
1418 int *next_buflen = &state->buflen_0 + state->current_buf; 1422 &state->buflen_1 : &state->buflen_0;
1419 int to_hash; 1423 int to_hash;
1420 u32 *sh_desc = ctx->sh_desc_update_first, *desc; 1424 u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1421 dma_addr_t ptr = ctx->sh_desc_update_first_dma; 1425 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
@@ -1476,7 +1480,8 @@ static int ahash_update_first(struct ahash_request *req)
1476 } 1480 }
1477 1481
1478 if (*next_buflen) 1482 if (*next_buflen)
1479 sg_copy_part(next_buf, req->src, to_hash, req->nbytes); 1483 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1484 *next_buflen, 0);
1480 1485
1481 sh_len = desc_len(sh_desc); 1486 sh_len = desc_len(sh_desc);
1482 desc = edesc->hw_desc; 1487 desc = edesc->hw_desc;
@@ -1511,7 +1516,8 @@ static int ahash_update_first(struct ahash_request *req)
1511 state->update = ahash_update_no_ctx; 1516 state->update = ahash_update_no_ctx;
1512 state->finup = ahash_finup_no_ctx; 1517 state->finup = ahash_finup_no_ctx;
1513 state->final = ahash_final_no_ctx; 1518 state->final = ahash_final_no_ctx;
1514 sg_copy(next_buf, req->src, req->nbytes); 1519 scatterwalk_map_and_copy(next_buf, req->src, 0,
1520 req->nbytes, 0);
1515 } 1521 }
1516#ifdef DEBUG 1522#ifdef DEBUG
1517 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1523 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 3cade79ea41e..31000c8c4a90 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -1,5 +1,4 @@
1/* 1/* * CAAM control-plane driver backend
2 * CAAM control-plane driver backend
3 * Controller-level driver, kernel property detection, initialization 2 * Controller-level driver, kernel property detection, initialization
4 * 3 *
5 * Copyright 2008-2012 Freescale Semiconductor, Inc. 4 * Copyright 2008-2012 Freescale Semiconductor, Inc.
@@ -81,38 +80,37 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
81 u32 *status) 80 u32 *status)
82{ 81{
83 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); 82 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
84 struct caam_full __iomem *topregs; 83 struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
84 struct caam_deco __iomem *deco = ctrlpriv->deco;
85 unsigned int timeout = 100000; 85 unsigned int timeout = 100000;
86 u32 deco_dbg_reg, flags; 86 u32 deco_dbg_reg, flags;
87 int i; 87 int i;
88 88
89 /* Set the bit to request direct access to DECO0 */
90 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
91 89
92 if (ctrlpriv->virt_en == 1) { 90 if (ctrlpriv->virt_en == 1) {
93 setbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0); 91 setbits32(&ctrl->deco_rsr, DECORSR_JR0);
94 92
95 while (!(rd_reg32(&topregs->ctrl.deco_rsr) & DECORSR_VALID) && 93 while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
96 --timeout) 94 --timeout)
97 cpu_relax(); 95 cpu_relax();
98 96
99 timeout = 100000; 97 timeout = 100000;
100 } 98 }
101 99
102 setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); 100 setbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
103 101
104 while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) && 102 while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
105 --timeout) 103 --timeout)
106 cpu_relax(); 104 cpu_relax();
107 105
108 if (!timeout) { 106 if (!timeout) {
109 dev_err(ctrldev, "failed to acquire DECO 0\n"); 107 dev_err(ctrldev, "failed to acquire DECO 0\n");
110 clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); 108 clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
111 return -ENODEV; 109 return -ENODEV;
112 } 110 }
113 111
114 for (i = 0; i < desc_len(desc); i++) 112 for (i = 0; i < desc_len(desc); i++)
115 wr_reg32(&topregs->deco.descbuf[i], *(desc + i)); 113 wr_reg32(&deco->descbuf[i], *(desc + i));
116 114
117 flags = DECO_JQCR_WHL; 115 flags = DECO_JQCR_WHL;
118 /* 116 /*
@@ -123,11 +121,11 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
123 flags |= DECO_JQCR_FOUR; 121 flags |= DECO_JQCR_FOUR;
124 122
125 /* Instruct the DECO to execute it */ 123 /* Instruct the DECO to execute it */
126 wr_reg32(&topregs->deco.jr_ctl_hi, flags); 124 wr_reg32(&deco->jr_ctl_hi, flags);
127 125
128 timeout = 10000000; 126 timeout = 10000000;
129 do { 127 do {
130 deco_dbg_reg = rd_reg32(&topregs->deco.desc_dbg); 128 deco_dbg_reg = rd_reg32(&deco->desc_dbg);
131 /* 129 /*
132 * If an error occured in the descriptor, then 130 * If an error occured in the descriptor, then
133 * the DECO status field will be set to 0x0D 131 * the DECO status field will be set to 0x0D
@@ -138,14 +136,14 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
138 cpu_relax(); 136 cpu_relax();
139 } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout); 137 } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
140 138
141 *status = rd_reg32(&topregs->deco.op_status_hi) & 139 *status = rd_reg32(&deco->op_status_hi) &
142 DECO_OP_STATUS_HI_ERR_MASK; 140 DECO_OP_STATUS_HI_ERR_MASK;
143 141
144 if (ctrlpriv->virt_en == 1) 142 if (ctrlpriv->virt_en == 1)
145 clrbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0); 143 clrbits32(&ctrl->deco_rsr, DECORSR_JR0);
146 144
147 /* Mark the DECO as free */ 145 /* Mark the DECO as free */
148 clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); 146 clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
149 147
150 if (!timeout) 148 if (!timeout)
151 return -EAGAIN; 149 return -EAGAIN;
@@ -176,13 +174,13 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
176 int gen_sk) 174 int gen_sk)
177{ 175{
178 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); 176 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
179 struct caam_full __iomem *topregs; 177 struct caam_ctrl __iomem *ctrl;
180 struct rng4tst __iomem *r4tst; 178 struct rng4tst __iomem *r4tst;
181 u32 *desc, status, rdsta_val; 179 u32 *desc, status, rdsta_val;
182 int ret = 0, sh_idx; 180 int ret = 0, sh_idx;
183 181
184 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; 182 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
185 r4tst = &topregs->ctrl.r4tst[0]; 183 r4tst = &ctrl->r4tst[0];
186 184
187 desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL); 185 desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
188 if (!desc) 186 if (!desc)
@@ -212,12 +210,11 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
212 * CAAM eras), then try again. 210 * CAAM eras), then try again.
213 */ 211 */
214 rdsta_val = 212 rdsta_val =
215 rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IFMASK; 213 rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
216 if (status || !(rdsta_val & (1 << sh_idx))) 214 if (status || !(rdsta_val & (1 << sh_idx)))
217 ret = -EAGAIN; 215 ret = -EAGAIN;
218 if (ret) 216 if (ret)
219 break; 217 break;
220
221 dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx); 218 dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
222 /* Clear the contents before recreating the descriptor */ 219 /* Clear the contents before recreating the descriptor */
223 memset(desc, 0x00, CAAM_CMD_SZ * 7); 220 memset(desc, 0x00, CAAM_CMD_SZ * 7);
@@ -285,12 +282,12 @@ static int caam_remove(struct platform_device *pdev)
285{ 282{
286 struct device *ctrldev; 283 struct device *ctrldev;
287 struct caam_drv_private *ctrlpriv; 284 struct caam_drv_private *ctrlpriv;
288 struct caam_full __iomem *topregs; 285 struct caam_ctrl __iomem *ctrl;
289 int ring, ret = 0; 286 int ring, ret = 0;
290 287
291 ctrldev = &pdev->dev; 288 ctrldev = &pdev->dev;
292 ctrlpriv = dev_get_drvdata(ctrldev); 289 ctrlpriv = dev_get_drvdata(ctrldev);
293 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; 290 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
294 291
295 /* Remove platform devices for JobRs */ 292 /* Remove platform devices for JobRs */
296 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { 293 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
@@ -308,7 +305,7 @@ static int caam_remove(struct platform_device *pdev)
308#endif 305#endif
309 306
310 /* Unmap controller region */ 307 /* Unmap controller region */
311 iounmap(&topregs->ctrl); 308 iounmap(&ctrl);
312 309
313 return ret; 310 return ret;
314} 311}
@@ -323,12 +320,12 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
323{ 320{
324 struct device *ctrldev = &pdev->dev; 321 struct device *ctrldev = &pdev->dev;
325 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); 322 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
326 struct caam_full __iomem *topregs; 323 struct caam_ctrl __iomem *ctrl;
327 struct rng4tst __iomem *r4tst; 324 struct rng4tst __iomem *r4tst;
328 u32 val; 325 u32 val;
329 326
330 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; 327 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
331 r4tst = &topregs->ctrl.r4tst[0]; 328 r4tst = &ctrl->r4tst[0];
332 329
333 /* put RNG4 into program mode */ 330 /* put RNG4 into program mode */
334 setbits32(&r4tst->rtmctl, RTMCTL_PRGM); 331 setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
@@ -355,10 +352,19 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
355 wr_reg32(&r4tst->rtsdctl, val); 352 wr_reg32(&r4tst->rtsdctl, val);
356 /* min. freq. count, equal to 1/4 of the entropy sample length */ 353 /* min. freq. count, equal to 1/4 of the entropy sample length */
357 wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2); 354 wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
358 /* max. freq. count, equal to 8 times the entropy sample length */ 355 /* disable maximum frequency count */
359 wr_reg32(&r4tst->rtfrqmax, ent_delay << 3); 356 wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
357 /* read the control register */
358 val = rd_reg32(&r4tst->rtmctl);
359 /*
360 * select raw sampling in both entropy shifter
361 * and statistical checker
362 */
363 setbits32(&val, RTMCTL_SAMP_MODE_RAW_ES_SC);
360 /* put RNG4 into run mode */ 364 /* put RNG4 into run mode */
361 clrbits32(&r4tst->rtmctl, RTMCTL_PRGM); 365 clrbits32(&val, RTMCTL_PRGM);
366 /* write back the control register */
367 wr_reg32(&r4tst->rtmctl, val);
362} 368}
363 369
364/** 370/**
@@ -387,13 +393,14 @@ static int caam_probe(struct platform_device *pdev)
387 struct device *dev; 393 struct device *dev;
388 struct device_node *nprop, *np; 394 struct device_node *nprop, *np;
389 struct caam_ctrl __iomem *ctrl; 395 struct caam_ctrl __iomem *ctrl;
390 struct caam_full __iomem *topregs;
391 struct caam_drv_private *ctrlpriv; 396 struct caam_drv_private *ctrlpriv;
392#ifdef CONFIG_DEBUG_FS 397#ifdef CONFIG_DEBUG_FS
393 struct caam_perfmon *perfmon; 398 struct caam_perfmon *perfmon;
394#endif 399#endif
395 u32 scfgr, comp_params; 400 u32 scfgr, comp_params;
396 u32 cha_vid_ls; 401 u32 cha_vid_ls;
402 int pg_size;
403 int BLOCK_OFFSET = 0;
397 404
398 ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private), 405 ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private),
399 GFP_KERNEL); 406 GFP_KERNEL);
@@ -412,10 +419,27 @@ static int caam_probe(struct platform_device *pdev)
412 dev_err(dev, "caam: of_iomap() failed\n"); 419 dev_err(dev, "caam: of_iomap() failed\n");
413 return -ENOMEM; 420 return -ENOMEM;
414 } 421 }
415 ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl; 422 /* Finding the page size for using the CTPR_MS register */
423 comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
424 pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
416 425
417 /* topregs used to derive pointers to CAAM sub-blocks only */ 426 /* Allocating the BLOCK_OFFSET based on the supported page size on
418 topregs = (struct caam_full __iomem *)ctrl; 427 * the platform
428 */
429 if (pg_size == 0)
430 BLOCK_OFFSET = PG_SIZE_4K;
431 else
432 BLOCK_OFFSET = PG_SIZE_64K;
433
434 ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
435 ctrlpriv->assure = (struct caam_assurance __force *)
436 ((uint8_t *)ctrl +
437 BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
438 );
439 ctrlpriv->deco = (struct caam_deco __force *)
440 ((uint8_t *)ctrl +
441 BLOCK_OFFSET * DECO_BLOCK_NUMBER
442 );
419 443
420 /* Get the IRQ of the controller (for security violations only) */ 444 /* Get the IRQ of the controller (for security violations only) */
421 ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0); 445 ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
@@ -424,15 +448,14 @@ static int caam_probe(struct platform_device *pdev)
424 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel, 448 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
425 * long pointers in master configuration register 449 * long pointers in master configuration register
426 */ 450 */
427 setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE | 451 setbits32(&ctrl->mcr, MCFGR_WDENABLE |
428 (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0)); 452 (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
429 453
430 /* 454 /*
431 * Read the Compile Time paramters and SCFGR to determine 455 * Read the Compile Time paramters and SCFGR to determine
432 * if Virtualization is enabled for this platform 456 * if Virtualization is enabled for this platform
433 */ 457 */
434 comp_params = rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms); 458 scfgr = rd_reg32(&ctrl->scfgr);
435 scfgr = rd_reg32(&topregs->ctrl.scfgr);
436 459
437 ctrlpriv->virt_en = 0; 460 ctrlpriv->virt_en = 0;
438 if (comp_params & CTPR_MS_VIRT_EN_INCL) { 461 if (comp_params & CTPR_MS_VIRT_EN_INCL) {
@@ -450,7 +473,7 @@ static int caam_probe(struct platform_device *pdev)
450 } 473 }
451 474
452 if (ctrlpriv->virt_en == 1) 475 if (ctrlpriv->virt_en == 1)
453 setbits32(&topregs->ctrl.jrstart, JRSTART_JR0_START | 476 setbits32(&ctrl->jrstart, JRSTART_JR0_START |
454 JRSTART_JR1_START | JRSTART_JR2_START | 477 JRSTART_JR1_START | JRSTART_JR2_START |
455 JRSTART_JR3_START); 478 JRSTART_JR3_START);
456 479
@@ -477,7 +500,7 @@ static int caam_probe(struct platform_device *pdev)
477 sizeof(struct platform_device *) * rspec, 500 sizeof(struct platform_device *) * rspec,
478 GFP_KERNEL); 501 GFP_KERNEL);
479 if (ctrlpriv->jrpdev == NULL) { 502 if (ctrlpriv->jrpdev == NULL) {
480 iounmap(&topregs->ctrl); 503 iounmap(&ctrl);
481 return -ENOMEM; 504 return -ENOMEM;
482 } 505 }
483 506
@@ -493,18 +516,26 @@ static int caam_probe(struct platform_device *pdev)
493 ring); 516 ring);
494 continue; 517 continue;
495 } 518 }
519 ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
520 ((uint8_t *)ctrl +
521 (ring + JR_BLOCK_NUMBER) *
522 BLOCK_OFFSET
523 );
496 ctrlpriv->total_jobrs++; 524 ctrlpriv->total_jobrs++;
497 ring++; 525 ring++;
498 } 526 }
499 527
500 /* Check to see if QI present. If so, enable */ 528 /* Check to see if QI present. If so, enable */
501 ctrlpriv->qi_present = 529 ctrlpriv->qi_present =
502 !!(rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms) & 530 !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
503 CTPR_MS_QI_MASK); 531 CTPR_MS_QI_MASK);
504 if (ctrlpriv->qi_present) { 532 if (ctrlpriv->qi_present) {
505 ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi; 533 ctrlpriv->qi = (struct caam_queue_if __force *)
534 ((uint8_t *)ctrl +
535 BLOCK_OFFSET * QI_BLOCK_NUMBER
536 );
506 /* This is all that's required to physically enable QI */ 537 /* This is all that's required to physically enable QI */
507 wr_reg32(&topregs->qi.qi_control_lo, QICTL_DQEN); 538 wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
508 } 539 }
509 540
510 /* If no QI and no rings specified, quit and go home */ 541 /* If no QI and no rings specified, quit and go home */
@@ -514,7 +545,7 @@ static int caam_probe(struct platform_device *pdev)
514 return -ENOMEM; 545 return -ENOMEM;
515 } 546 }
516 547
517 cha_vid_ls = rd_reg32(&topregs->ctrl.perfmon.cha_id_ls); 548 cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
518 549
519 /* 550 /*
520 * If SEC has RNG version >= 4 and RNG state handle has not been 551 * If SEC has RNG version >= 4 and RNG state handle has not been
@@ -522,7 +553,7 @@ static int caam_probe(struct platform_device *pdev)
522 */ 553 */
523 if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) { 554 if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
524 ctrlpriv->rng4_sh_init = 555 ctrlpriv->rng4_sh_init =
525 rd_reg32(&topregs->ctrl.r4tst[0].rdsta); 556 rd_reg32(&ctrl->r4tst[0].rdsta);
526 /* 557 /*
527 * If the secure keys (TDKEK, JDKEK, TDSK), were already 558 * If the secure keys (TDKEK, JDKEK, TDSK), were already
528 * generated, signal this to the function that is instantiating 559 * generated, signal this to the function that is instantiating
@@ -533,7 +564,7 @@ static int caam_probe(struct platform_device *pdev)
533 ctrlpriv->rng4_sh_init &= RDSTA_IFMASK; 564 ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
534 do { 565 do {
535 int inst_handles = 566 int inst_handles =
536 rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & 567 rd_reg32(&ctrl->r4tst[0].rdsta) &
537 RDSTA_IFMASK; 568 RDSTA_IFMASK;
538 /* 569 /*
539 * If either SH were instantiated by somebody else 570 * If either SH were instantiated by somebody else
@@ -544,6 +575,9 @@ static int caam_probe(struct platform_device *pdev)
544 * the TRNG parameters. 575 * the TRNG parameters.
545 */ 576 */
546 if (!(ctrlpriv->rng4_sh_init || inst_handles)) { 577 if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
578 dev_info(dev,
579 "Entropy delay = %u\n",
580 ent_delay);
547 kick_trng(pdev, ent_delay); 581 kick_trng(pdev, ent_delay);
548 ent_delay += 400; 582 ent_delay += 400;
549 } 583 }
@@ -556,6 +590,12 @@ static int caam_probe(struct platform_device *pdev)
556 */ 590 */
557 ret = instantiate_rng(dev, inst_handles, 591 ret = instantiate_rng(dev, inst_handles,
558 gen_sk); 592 gen_sk);
593 if (ret == -EAGAIN)
594 /*
595 * if here, the loop will rerun,
596 * so don't hog the CPU
597 */
598 cpu_relax();
559 } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX)); 599 } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
560 if (ret) { 600 if (ret) {
561 dev_err(dev, "failed to instantiate RNG"); 601 dev_err(dev, "failed to instantiate RNG");
@@ -569,13 +609,13 @@ static int caam_probe(struct platform_device *pdev)
569 ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK; 609 ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
570 610
571 /* Enable RDB bit so that RNG works faster */ 611 /* Enable RDB bit so that RNG works faster */
572 setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE); 612 setbits32(&ctrl->scfgr, SCFGR_RDBENABLE);
573 } 613 }
574 614
575 /* NOTE: RTIC detection ought to go here, around Si time */ 615 /* NOTE: RTIC detection ought to go here, around Si time */
576 616
577 caam_id = (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ms) << 32 | 617 caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
578 (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ls); 618 (u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
579 619
580 /* Report "alive" for developer to see */ 620 /* Report "alive" for developer to see */
581 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, 621 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 97363db4e56e..89b94cc9e7a2 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -70,10 +70,11 @@ struct caam_drv_private {
70 struct platform_device *pdev; 70 struct platform_device *pdev;
71 71
72 /* Physical-presence section */ 72 /* Physical-presence section */
73 struct caam_ctrl *ctrl; /* controller region */ 73 struct caam_ctrl __iomem *ctrl; /* controller region */
74 struct caam_deco **deco; /* DECO/CCB views */ 74 struct caam_deco __iomem *deco; /* DECO/CCB views */
75 struct caam_assurance *ac; 75 struct caam_assurance __iomem *assure;
76 struct caam_queue_if *qi; /* QI control region */ 76 struct caam_queue_if __iomem *qi; /* QI control region */
77 struct caam_job_ring __iomem *jr[4]; /* JobR's register space */
77 78
78 /* 79 /*
79 * Detected geometry block. Filled in from device tree if powerpc, 80 * Detected geometry block. Filled in from device tree if powerpc,
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index f48e344ffc39..378ddc17f60e 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -194,6 +194,8 @@ struct caam_perfmon {
194#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT) 194#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
195#define CTPR_MS_VIRT_EN_INCL 0x00000001 195#define CTPR_MS_VIRT_EN_INCL 0x00000001
196#define CTPR_MS_VIRT_EN_POR 0x00000002 196#define CTPR_MS_VIRT_EN_POR 0x00000002
197#define CTPR_MS_PG_SZ_MASK 0x10
198#define CTPR_MS_PG_SZ_SHIFT 4
197 u32 comp_parms_ms; /* CTPR - Compile Parameters Register */ 199 u32 comp_parms_ms; /* CTPR - Compile Parameters Register */
198 u32 comp_parms_ls; /* CTPR - Compile Parameters Register */ 200 u32 comp_parms_ls; /* CTPR - Compile Parameters Register */
199 u64 rsvd1[2]; 201 u64 rsvd1[2];
@@ -269,6 +271,16 @@ struct rngtst {
269/* RNG4 TRNG test registers */ 271/* RNG4 TRNG test registers */
270struct rng4tst { 272struct rng4tst {
271#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */ 273#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
274#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_SC 0 /* use von Neumann data in
275 both entropy shifter and
276 statistical checker */
277#define RTMCTL_SAMP_MODE_RAW_ES_SC 1 /* use raw data in both
278 entropy shifter and
279 statistical checker */
280#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_RAW_SC 2 /* use von Neumann data in
281 entropy shifter, raw data
282 in statistical checker */
283#define RTMCTL_SAMP_MODE_INVALID 3 /* invalid combination */
272 u32 rtmctl; /* misc. control register */ 284 u32 rtmctl; /* misc. control register */
273 u32 rtscmisc; /* statistical check misc. register */ 285 u32 rtscmisc; /* statistical check misc. register */
274 u32 rtpkrrng; /* poker range register */ 286 u32 rtpkrrng; /* poker range register */
@@ -278,7 +290,7 @@ struct rng4tst {
278 }; 290 };
279#define RTSDCTL_ENT_DLY_SHIFT 16 291#define RTSDCTL_ENT_DLY_SHIFT 16
280#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT) 292#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
281#define RTSDCTL_ENT_DLY_MIN 1200 293#define RTSDCTL_ENT_DLY_MIN 3200
282#define RTSDCTL_ENT_DLY_MAX 12800 294#define RTSDCTL_ENT_DLY_MAX 12800
283 u32 rtsdctl; /* seed control register */ 295 u32 rtsdctl; /* seed control register */
284 union { 296 union {
@@ -286,6 +298,7 @@ struct rng4tst {
286 u32 rttotsam; /* PRGM=0: total samples register */ 298 u32 rttotsam; /* PRGM=0: total samples register */
287 }; 299 };
288 u32 rtfrqmin; /* frequency count min. limit register */ 300 u32 rtfrqmin; /* frequency count min. limit register */
301#define RTFRQMAX_DISABLE (1 << 20)
289 union { 302 union {
290 u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */ 303 u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */
291 u32 rtfrqcnt; /* PRGM=0: freq. count register */ 304 u32 rtfrqcnt; /* PRGM=0: freq. count register */
@@ -758,34 +771,10 @@ struct caam_deco {
758#define DECO_JQCR_WHL 0x20000000 771#define DECO_JQCR_WHL 0x20000000
759#define DECO_JQCR_FOUR 0x10000000 772#define DECO_JQCR_FOUR 0x10000000
760 773
761/* 774#define JR_BLOCK_NUMBER 1
762 * Current top-level view of memory map is: 775#define ASSURE_BLOCK_NUMBER 6
763 * 776#define QI_BLOCK_NUMBER 7
764 * 0x0000 - 0x0fff - CAAM Top-Level Control 777#define DECO_BLOCK_NUMBER 8
765 * 0x1000 - 0x1fff - Job Ring 0 778#define PG_SIZE_4K 0x1000
766 * 0x2000 - 0x2fff - Job Ring 1 779#define PG_SIZE_64K 0x10000
767 * 0x3000 - 0x3fff - Job Ring 2
768 * 0x4000 - 0x4fff - Job Ring 3
769 * 0x5000 - 0x5fff - (unused)
770 * 0x6000 - 0x6fff - Assurance Controller
771 * 0x7000 - 0x7fff - Queue Interface
772 * 0x8000 - 0x8fff - DECO-CCB 0
773 * 0x9000 - 0x9fff - DECO-CCB 1
774 * 0xa000 - 0xafff - DECO-CCB 2
775 * 0xb000 - 0xbfff - DECO-CCB 3
776 * 0xc000 - 0xcfff - DECO-CCB 4
777 *
778 * caam_full describes the full register view of CAAM if useful,
779 * although many configurations may choose to implement parts of
780 * the register map separately, in differing privilege regions
781 */
782struct caam_full {
783 struct caam_ctrl __iomem ctrl;
784 struct caam_job_ring jr[4];
785 u64 rsvd[512];
786 struct caam_assurance assure;
787 struct caam_queue_if qi;
788 struct caam_deco deco;
789};
790
791#endif /* REGS_H */ 780#endif /* REGS_H */
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index b12ff85f4241..ce28a563effc 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -116,57 +116,3 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
116 } 116 }
117 return nents; 117 return nents;
118} 118}
119
120/* Map SG page in kernel virtual address space and copy */
121static inline void sg_map_copy(u8 *dest, struct scatterlist *sg,
122 int len, int offset)
123{
124 u8 *mapped_addr;
125
126 /*
127 * Page here can be user-space pinned using get_user_pages
128 * Same must be kmapped before use and kunmapped subsequently
129 */
130 mapped_addr = kmap_atomic(sg_page(sg));
131 memcpy(dest, mapped_addr + offset, len);
132 kunmap_atomic(mapped_addr);
133}
134
135/* Copy from len bytes of sg to dest, starting from beginning */
136static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
137{
138 struct scatterlist *current_sg = sg;
139 int cpy_index = 0, next_cpy_index = current_sg->length;
140
141 while (next_cpy_index < len) {
142 sg_map_copy(dest + cpy_index, current_sg, current_sg->length,
143 current_sg->offset);
144 current_sg = scatterwalk_sg_next(current_sg);
145 cpy_index = next_cpy_index;
146 next_cpy_index += current_sg->length;
147 }
148 if (cpy_index < len)
149 sg_map_copy(dest + cpy_index, current_sg, len-cpy_index,
150 current_sg->offset);
151}
152
153/* Copy sg data, from to_skip to end, to dest */
154static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
155 int to_skip, unsigned int end)
156{
157 struct scatterlist *current_sg = sg;
158 int sg_index, cpy_index, offset;
159
160 sg_index = current_sg->length;
161 while (sg_index <= to_skip) {
162 current_sg = scatterwalk_sg_next(current_sg);
163 sg_index += current_sg->length;
164 }
165 cpy_index = sg_index - to_skip;
166 offset = current_sg->offset + current_sg->length - cpy_index;
167 sg_map_copy(dest, current_sg, cpy_index, offset);
168 if (end - sg_index) {
169 current_sg = scatterwalk_sg_next(current_sg);
170 sg_copy(dest + cpy_index, current_sg, end - sg_index);
171 }
172}
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
index 08fcb1116d90..9249d3ed184b 100644
--- a/drivers/crypto/mv_cesa.h
+++ b/drivers/crypto/mv_cesa.h
@@ -1,4 +1,5 @@
1#ifndef __MV_CRYPTO_H__ 1#ifndef __MV_CRYPTO_H__
2#define __MV_CRYPTO_H__
2 3
3#define DIGEST_INITIAL_VAL_A 0xdd00 4#define DIGEST_INITIAL_VAL_A 0xdd00
4#define DIGEST_INITIAL_VAL_B 0xdd04 5#define DIGEST_INITIAL_VAL_B 0xdd04
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 6a92284a86b2..244d73378f0e 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -111,7 +111,7 @@ static int adf_chr_drv_create(void)
111 drv_device = device_create(adt_ctl_drv.drv_class, NULL, 111 drv_device = device_create(adt_ctl_drv.drv_class, NULL,
112 MKDEV(adt_ctl_drv.major, 0), 112 MKDEV(adt_ctl_drv.major, 0),
113 NULL, DEVICE_NAME); 113 NULL, DEVICE_NAME);
114 if (!drv_device) { 114 if (IS_ERR(drv_device)) {
115 pr_err("QAT: failed to create device\n"); 115 pr_err("QAT: failed to create device\n");
116 goto err_cdev_del; 116 goto err_cdev_del;
117 } 117 }
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h
index f854bac276b0..c40546079981 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_internal.h
+++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h
@@ -75,7 +75,7 @@ struct adf_etr_ring_data {
75 75
76struct adf_etr_bank_data { 76struct adf_etr_bank_data {
77 struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK]; 77 struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK];
78 struct tasklet_struct resp_hanlder; 78 struct tasklet_struct resp_handler;
79 void __iomem *csr_addr; 79 void __iomem *csr_addr;
80 struct adf_accel_dev *accel_dev; 80 struct adf_accel_dev *accel_dev;
81 uint32_t irq_coalesc_timer; 81 uint32_t irq_coalesc_timer;
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 59df48872955..3e26fa2b293f 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -105,7 +105,7 @@ struct qat_alg_cd {
105#define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk) 105#define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
106 106
107struct qat_auth_state { 107struct qat_auth_state {
108 uint8_t data[MAX_AUTH_STATE_SIZE]; 108 uint8_t data[MAX_AUTH_STATE_SIZE + 64];
109} __aligned(64); 109} __aligned(64);
110 110
111struct qat_alg_session_ctx { 111struct qat_alg_session_ctx {
@@ -113,10 +113,6 @@ struct qat_alg_session_ctx {
113 dma_addr_t enc_cd_paddr; 113 dma_addr_t enc_cd_paddr;
114 struct qat_alg_cd *dec_cd; 114 struct qat_alg_cd *dec_cd;
115 dma_addr_t dec_cd_paddr; 115 dma_addr_t dec_cd_paddr;
116 struct qat_auth_state *auth_hw_state_enc;
117 dma_addr_t auth_state_enc_paddr;
118 struct qat_auth_state *auth_hw_state_dec;
119 dma_addr_t auth_state_dec_paddr;
120 struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl; 116 struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl;
121 struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl; 117 struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl;
122 struct qat_crypto_instance *inst; 118 struct qat_crypto_instance *inst;
@@ -150,8 +146,9 @@ static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
150static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, 146static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
151 struct qat_alg_session_ctx *ctx, 147 struct qat_alg_session_ctx *ctx,
152 const uint8_t *auth_key, 148 const uint8_t *auth_key,
153 unsigned int auth_keylen, uint8_t *auth_state) 149 unsigned int auth_keylen)
154{ 150{
151 struct qat_auth_state auth_state;
155 struct { 152 struct {
156 struct shash_desc shash; 153 struct shash_desc shash;
157 char ctx[crypto_shash_descsize(ctx->hash_tfm)]; 154 char ctx[crypto_shash_descsize(ctx->hash_tfm)];
@@ -161,12 +158,13 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
161 struct sha512_state sha512; 158 struct sha512_state sha512;
162 int block_size = crypto_shash_blocksize(ctx->hash_tfm); 159 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
163 int digest_size = crypto_shash_digestsize(ctx->hash_tfm); 160 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
164 uint8_t *ipad = auth_state; 161 uint8_t *ipad = auth_state.data;
165 uint8_t *opad = ipad + block_size; 162 uint8_t *opad = ipad + block_size;
166 __be32 *hash_state_out; 163 __be32 *hash_state_out;
167 __be64 *hash512_state_out; 164 __be64 *hash512_state_out;
168 int i, offset; 165 int i, offset;
169 166
167 memset(auth_state.data, '\0', MAX_AUTH_STATE_SIZE + 64);
170 desc.shash.tfm = ctx->hash_tfm; 168 desc.shash.tfm = ctx->hash_tfm;
171 desc.shash.flags = 0x0; 169 desc.shash.flags = 0x0;
172 170
@@ -298,10 +296,6 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
298 void *ptr = &req_tmpl->cd_ctrl; 296 void *ptr = &req_tmpl->cd_ctrl;
299 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; 297 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
300 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; 298 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
301 struct icp_qat_fw_la_auth_req_params *auth_param =
302 (struct icp_qat_fw_la_auth_req_params *)
303 ((char *)&req_tmpl->serv_specif_rqpars +
304 sizeof(struct icp_qat_fw_la_cipher_req_params));
305 299
306 /* CD setup */ 300 /* CD setup */
307 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg); 301 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg);
@@ -312,8 +306,7 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
312 hash->sha.inner_setup.auth_counter.counter = 306 hash->sha.inner_setup.auth_counter.counter =
313 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm)); 307 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
314 308
315 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen, 309 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
316 (uint8_t *)ctx->auth_hw_state_enc))
317 return -EFAULT; 310 return -EFAULT;
318 311
319 /* Request setup */ 312 /* Request setup */
@@ -359,9 +352,6 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
359 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + 352 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
360 ((sizeof(struct icp_qat_hw_auth_setup) + 353 ((sizeof(struct icp_qat_hw_auth_setup) +
361 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3); 354 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
362 auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr +
363 sizeof(struct icp_qat_hw_auth_counter) +
364 round_up(hash_cd_ctrl->inner_state1_sz, 8);
365 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); 355 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
366 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); 356 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
367 return 0; 357 return 0;
@@ -399,8 +389,7 @@ static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
399 hash->sha.inner_setup.auth_counter.counter = 389 hash->sha.inner_setup.auth_counter.counter =
400 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm)); 390 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
401 391
402 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen, 392 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
403 (uint8_t *)ctx->auth_hw_state_dec))
404 return -EFAULT; 393 return -EFAULT;
405 394
406 /* Request setup */ 395 /* Request setup */
@@ -450,9 +439,6 @@ static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
450 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + 439 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
451 ((sizeof(struct icp_qat_hw_auth_setup) + 440 ((sizeof(struct icp_qat_hw_auth_setup) +
452 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3); 441 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
453 auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr +
454 sizeof(struct icp_qat_hw_auth_counter) +
455 round_up(hash_cd_ctrl->inner_state1_sz, 8);
456 auth_param->auth_res_sz = digestsize; 442 auth_param->auth_res_sz = digestsize;
457 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); 443 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
458 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); 444 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
@@ -512,10 +498,6 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
512 dev = &GET_DEV(ctx->inst->accel_dev); 498 dev = &GET_DEV(ctx->inst->accel_dev);
513 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd)); 499 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
514 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd)); 500 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
515 memset(ctx->auth_hw_state_enc, 0,
516 sizeof(struct qat_auth_state));
517 memset(ctx->auth_hw_state_dec, 0,
518 sizeof(struct qat_auth_state));
519 memset(&ctx->enc_fw_req_tmpl, 0, 501 memset(&ctx->enc_fw_req_tmpl, 0,
520 sizeof(struct icp_qat_fw_la_bulk_req)); 502 sizeof(struct icp_qat_fw_la_bulk_req));
521 memset(&ctx->dec_fw_req_tmpl, 0, 503 memset(&ctx->dec_fw_req_tmpl, 0,
@@ -548,22 +530,6 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
548 spin_unlock(&ctx->lock); 530 spin_unlock(&ctx->lock);
549 goto out_free_enc; 531 goto out_free_enc;
550 } 532 }
551 ctx->auth_hw_state_enc =
552 dma_zalloc_coherent(dev, sizeof(struct qat_auth_state),
553 &ctx->auth_state_enc_paddr,
554 GFP_ATOMIC);
555 if (!ctx->auth_hw_state_enc) {
556 spin_unlock(&ctx->lock);
557 goto out_free_dec;
558 }
559 ctx->auth_hw_state_dec =
560 dma_zalloc_coherent(dev, sizeof(struct qat_auth_state),
561 &ctx->auth_state_dec_paddr,
562 GFP_ATOMIC);
563 if (!ctx->auth_hw_state_dec) {
564 spin_unlock(&ctx->lock);
565 goto out_free_auth_enc;
566 }
567 } 533 }
568 spin_unlock(&ctx->lock); 534 spin_unlock(&ctx->lock);
569 if (qat_alg_init_sessions(ctx, key, keylen)) 535 if (qat_alg_init_sessions(ctx, key, keylen))
@@ -572,14 +538,6 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
572 return 0; 538 return 0;
573 539
574out_free_all: 540out_free_all:
575 dma_free_coherent(dev, sizeof(struct qat_auth_state),
576 ctx->auth_hw_state_dec, ctx->auth_state_dec_paddr);
577 ctx->auth_hw_state_dec = NULL;
578out_free_auth_enc:
579 dma_free_coherent(dev, sizeof(struct qat_auth_state),
580 ctx->auth_hw_state_enc, ctx->auth_state_enc_paddr);
581 ctx->auth_hw_state_enc = NULL;
582out_free_dec:
583 dma_free_coherent(dev, sizeof(struct qat_alg_cd), 541 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
584 ctx->dec_cd, ctx->dec_cd_paddr); 542 ctx->dec_cd, ctx->dec_cd_paddr);
585 ctx->dec_cd = NULL; 543 ctx->dec_cd = NULL;
@@ -924,16 +882,6 @@ static void qat_alg_exit(struct crypto_tfm *tfm)
924 if (ctx->dec_cd) 882 if (ctx->dec_cd)
925 dma_free_coherent(dev, sizeof(struct qat_alg_cd), 883 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
926 ctx->dec_cd, ctx->dec_cd_paddr); 884 ctx->dec_cd, ctx->dec_cd_paddr);
927 if (ctx->auth_hw_state_enc)
928 dma_free_coherent(dev, sizeof(struct qat_auth_state),
929 ctx->auth_hw_state_enc,
930 ctx->auth_state_enc_paddr);
931
932 if (ctx->auth_hw_state_dec)
933 dma_free_coherent(dev, sizeof(struct qat_auth_state),
934 ctx->auth_hw_state_dec,
935 ctx->auth_state_dec_paddr);
936
937 qat_crypto_put_instance(inst); 885 qat_crypto_put_instance(inst);
938} 886}
939 887
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
index d4172dedf775..67ec61e51185 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
@@ -70,9 +70,9 @@ static int adf_enable_msix(struct adf_accel_dev *accel_dev)
70 for (i = 0; i < msix_num_entries; i++) 70 for (i = 0; i < msix_num_entries; i++)
71 pci_dev_info->msix_entries.entries[i].entry = i; 71 pci_dev_info->msix_entries.entries[i].entry = i;
72 72
73 if (pci_enable_msix(pci_dev_info->pci_dev, 73 if (pci_enable_msix_exact(pci_dev_info->pci_dev,
74 pci_dev_info->msix_entries.entries, 74 pci_dev_info->msix_entries.entries,
75 msix_num_entries)) { 75 msix_num_entries)) {
76 pr_err("QAT: Failed to enable MSIX IRQ\n"); 76 pr_err("QAT: Failed to enable MSIX IRQ\n");
77 return -EFAULT; 77 return -EFAULT;
78 } 78 }
@@ -89,7 +89,7 @@ static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
89 struct adf_etr_bank_data *bank = bank_ptr; 89 struct adf_etr_bank_data *bank = bank_ptr;
90 90
91 WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0); 91 WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0);
92 tasklet_hi_schedule(&bank->resp_hanlder); 92 tasklet_hi_schedule(&bank->resp_handler);
93 return IRQ_HANDLED; 93 return IRQ_HANDLED;
94} 94}
95 95
@@ -217,7 +217,7 @@ static int adf_setup_bh(struct adf_accel_dev *accel_dev)
217 int i; 217 int i;
218 218
219 for (i = 0; i < hw_data->num_banks; i++) 219 for (i = 0; i < hw_data->num_banks; i++)
220 tasklet_init(&priv_data->banks[i].resp_hanlder, 220 tasklet_init(&priv_data->banks[i].resp_handler,
221 adf_response_handler, 221 adf_response_handler,
222 (unsigned long)&priv_data->banks[i]); 222 (unsigned long)&priv_data->banks[i]);
223 return 0; 223 return 0;
@@ -230,8 +230,8 @@ static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
230 int i; 230 int i;
231 231
232 for (i = 0; i < hw_data->num_banks; i++) { 232 for (i = 0; i < hw_data->num_banks; i++) {
233 tasklet_disable(&priv_data->banks[i].resp_hanlder); 233 tasklet_disable(&priv_data->banks[i].resp_handler);
234 tasklet_kill(&priv_data->banks[i].resp_hanlder); 234 tasklet_kill(&priv_data->banks[i].resp_handler);
235 } 235 }
236} 236}
237 237