aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/talitos.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/talitos.c')
-rw-r--r--drivers/crypto/talitos.c216
1 files changed, 100 insertions, 116 deletions
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index c70775fd3ce2..c47ffe8a73ef 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -86,6 +86,25 @@ struct talitos_request {
86 void *context; 86 void *context;
87}; 87};
88 88
89/* per-channel fifo management */
90struct talitos_channel {
91 /* request fifo */
92 struct talitos_request *fifo;
93
94 /* number of requests pending in channel h/w fifo */
95 atomic_t submit_count ____cacheline_aligned;
96
97 /* request submission (head) lock */
98 spinlock_t head_lock ____cacheline_aligned;
99 /* index to next free descriptor request */
100 int head;
101
102 /* request release (tail) lock */
103 spinlock_t tail_lock ____cacheline_aligned;
104 /* index to next in-progress/done descriptor request */
105 int tail;
106};
107
89struct talitos_private { 108struct talitos_private {
90 struct device *dev; 109 struct device *dev;
91 struct of_device *ofdev; 110 struct of_device *ofdev;
@@ -101,15 +120,6 @@ struct talitos_private {
101 /* SEC Compatibility info */ 120 /* SEC Compatibility info */
102 unsigned long features; 121 unsigned long features;
103 122
104 /* next channel to be assigned next incoming descriptor */
105 atomic_t last_chan;
106
107 /* per-channel number of requests pending in channel h/w fifo */
108 atomic_t *submit_count;
109
110 /* per-channel request fifo */
111 struct talitos_request **fifo;
112
113 /* 123 /*
114 * length of the request fifo 124 * length of the request fifo
115 * fifo_len is chfifo_len rounded up to next power of 2 125 * fifo_len is chfifo_len rounded up to next power of 2
@@ -117,15 +127,10 @@ struct talitos_private {
117 */ 127 */
118 unsigned int fifo_len; 128 unsigned int fifo_len;
119 129
120 /* per-channel index to next free descriptor request */ 130 struct talitos_channel *chan;
121 int *head;
122
123 /* per-channel index to next in-progress/done descriptor request */
124 int *tail;
125 131
126 /* per-channel request submission (head) and release (tail) locks */ 132 /* next channel to be assigned next incoming descriptor */
127 spinlock_t *head_lock; 133 atomic_t last_chan ____cacheline_aligned;
128 spinlock_t *tail_lock;
129 134
130 /* request callback tasklet */ 135 /* request callback tasklet */
131 struct tasklet_struct done_task; 136 struct tasklet_struct done_task;
@@ -141,6 +146,12 @@ struct talitos_private {
141#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 146#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
142#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 147#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
143 148
149static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
150{
151 talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
152 talitos_ptr->eptr = cpu_to_be32(upper_32_bits(dma_addr));
153}
154
144/* 155/*
145 * map virtual single (contiguous) pointer to h/w descriptor pointer 156 * map virtual single (contiguous) pointer to h/w descriptor pointer
146 */ 157 */
@@ -150,8 +161,10 @@ static void map_single_talitos_ptr(struct device *dev,
150 unsigned char extent, 161 unsigned char extent,
151 enum dma_data_direction dir) 162 enum dma_data_direction dir)
152{ 163{
164 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
165
153 talitos_ptr->len = cpu_to_be16(len); 166 talitos_ptr->len = cpu_to_be16(len);
154 talitos_ptr->ptr = cpu_to_be32(dma_map_single(dev, data, len, dir)); 167 to_talitos_ptr(talitos_ptr, dma_addr);
155 talitos_ptr->j_extent = extent; 168 talitos_ptr->j_extent = extent;
156} 169}
157 170
@@ -182,9 +195,9 @@ static int reset_channel(struct device *dev, int ch)
182 return -EIO; 195 return -EIO;
183 } 196 }
184 197
185 /* set done writeback and IRQ */ 198 /* set 36-bit addressing, done writeback enable and done IRQ enable */
186 setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE | 199 setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_EAE |
187 TALITOS_CCCR_LO_CDIE); 200 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
188 201
189 /* and ICCR writeback, if available */ 202 /* and ICCR writeback, if available */
190 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) 203 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
@@ -282,16 +295,16 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
282 /* emulate SEC's round-robin channel fifo polling scheme */ 295 /* emulate SEC's round-robin channel fifo polling scheme */
283 ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1); 296 ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1);
284 297
285 spin_lock_irqsave(&priv->head_lock[ch], flags); 298 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
286 299
287 if (!atomic_inc_not_zero(&priv->submit_count[ch])) { 300 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
288 /* h/w fifo is full */ 301 /* h/w fifo is full */
289 spin_unlock_irqrestore(&priv->head_lock[ch], flags); 302 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
290 return -EAGAIN; 303 return -EAGAIN;
291 } 304 }
292 305
293 head = priv->head[ch]; 306 head = priv->chan[ch].head;
294 request = &priv->fifo[ch][head]; 307 request = &priv->chan[ch].fifo[head];
295 308
296 /* map descriptor and save caller data */ 309 /* map descriptor and save caller data */
297 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc), 310 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
@@ -300,16 +313,19 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
300 request->context = context; 313 request->context = context;
301 314
302 /* increment fifo head */ 315 /* increment fifo head */
303 priv->head[ch] = (priv->head[ch] + 1) & (priv->fifo_len - 1); 316 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
304 317
305 smp_wmb(); 318 smp_wmb();
306 request->desc = desc; 319 request->desc = desc;
307 320
308 /* GO! */ 321 /* GO! */
309 wmb(); 322 wmb();
310 out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc); 323 out_be32(priv->reg + TALITOS_FF(ch),
324 cpu_to_be32(upper_32_bits(request->dma_desc)));
325 out_be32(priv->reg + TALITOS_FF_LO(ch),
326 cpu_to_be32(lower_32_bits(request->dma_desc)));
311 327
312 spin_unlock_irqrestore(&priv->head_lock[ch], flags); 328 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
313 329
314 return -EINPROGRESS; 330 return -EINPROGRESS;
315} 331}
@@ -324,11 +340,11 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
324 unsigned long flags; 340 unsigned long flags;
325 int tail, status; 341 int tail, status;
326 342
327 spin_lock_irqsave(&priv->tail_lock[ch], flags); 343 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
328 344
329 tail = priv->tail[ch]; 345 tail = priv->chan[ch].tail;
330 while (priv->fifo[ch][tail].desc) { 346 while (priv->chan[ch].fifo[tail].desc) {
331 request = &priv->fifo[ch][tail]; 347 request = &priv->chan[ch].fifo[tail];
332 348
333 /* descriptors with their done bits set don't get the error */ 349 /* descriptors with their done bits set don't get the error */
334 rmb(); 350 rmb();
@@ -354,22 +370,22 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
354 request->desc = NULL; 370 request->desc = NULL;
355 371
356 /* increment fifo tail */ 372 /* increment fifo tail */
357 priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1); 373 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
358 374
359 spin_unlock_irqrestore(&priv->tail_lock[ch], flags); 375 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
360 376
361 atomic_dec(&priv->submit_count[ch]); 377 atomic_dec(&priv->chan[ch].submit_count);
362 378
363 saved_req.callback(dev, saved_req.desc, saved_req.context, 379 saved_req.callback(dev, saved_req.desc, saved_req.context,
364 status); 380 status);
365 /* channel may resume processing in single desc error case */ 381 /* channel may resume processing in single desc error case */
366 if (error && !reset_ch && status == error) 382 if (error && !reset_ch && status == error)
367 return; 383 return;
368 spin_lock_irqsave(&priv->tail_lock[ch], flags); 384 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
369 tail = priv->tail[ch]; 385 tail = priv->chan[ch].tail;
370 } 386 }
371 387
372 spin_unlock_irqrestore(&priv->tail_lock[ch], flags); 388 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
373} 389}
374 390
375/* 391/*
@@ -397,20 +413,20 @@ static void talitos_done(unsigned long data)
397static struct talitos_desc *current_desc(struct device *dev, int ch) 413static struct talitos_desc *current_desc(struct device *dev, int ch)
398{ 414{
399 struct talitos_private *priv = dev_get_drvdata(dev); 415 struct talitos_private *priv = dev_get_drvdata(dev);
400 int tail = priv->tail[ch]; 416 int tail = priv->chan[ch].tail;
401 dma_addr_t cur_desc; 417 dma_addr_t cur_desc;
402 418
403 cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch)); 419 cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
404 420
405 while (priv->fifo[ch][tail].dma_desc != cur_desc) { 421 while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
406 tail = (tail + 1) & (priv->fifo_len - 1); 422 tail = (tail + 1) & (priv->fifo_len - 1);
407 if (tail == priv->tail[ch]) { 423 if (tail == priv->chan[ch].tail) {
408 dev_err(dev, "couldn't locate current descriptor\n"); 424 dev_err(dev, "couldn't locate current descriptor\n");
409 return NULL; 425 return NULL;
410 } 426 }
411 } 427 }
412 428
413 return priv->fifo[ch][tail].desc; 429 return priv->chan[ch].fifo[tail].desc;
414} 430}
415 431
416/* 432/*
@@ -929,7 +945,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
929 int n_sg = sg_count; 945 int n_sg = sg_count;
930 946
931 while (n_sg--) { 947 while (n_sg--) {
932 link_tbl_ptr->ptr = cpu_to_be32(sg_dma_address(sg)); 948 to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
933 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); 949 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
934 link_tbl_ptr->j_extent = 0; 950 link_tbl_ptr->j_extent = 0;
935 link_tbl_ptr++; 951 link_tbl_ptr++;
@@ -970,7 +986,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
970 struct talitos_desc *desc = &edesc->desc; 986 struct talitos_desc *desc = &edesc->desc;
971 unsigned int cryptlen = areq->cryptlen; 987 unsigned int cryptlen = areq->cryptlen;
972 unsigned int authsize = ctx->authsize; 988 unsigned int authsize = ctx->authsize;
973 unsigned int ivsize; 989 unsigned int ivsize = crypto_aead_ivsize(aead);
974 int sg_count, ret; 990 int sg_count, ret;
975 int sg_link_tbl_len; 991 int sg_link_tbl_len;
976 992
@@ -978,11 +994,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
978 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, 994 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
979 0, DMA_TO_DEVICE); 995 0, DMA_TO_DEVICE);
980 /* hmac data */ 996 /* hmac data */
981 map_single_talitos_ptr(dev, &desc->ptr[1], sg_virt(areq->src) - 997 map_single_talitos_ptr(dev, &desc->ptr[1], areq->assoclen + ivsize,
982 sg_virt(areq->assoc), sg_virt(areq->assoc), 0, 998 sg_virt(areq->assoc), 0, DMA_TO_DEVICE);
983 DMA_TO_DEVICE);
984 /* cipher iv */ 999 /* cipher iv */
985 ivsize = crypto_aead_ivsize(aead);
986 map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0, 1000 map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
987 DMA_TO_DEVICE); 1001 DMA_TO_DEVICE);
988 1002
@@ -1006,7 +1020,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1006 edesc->src_is_chained); 1020 edesc->src_is_chained);
1007 1021
1008 if (sg_count == 1) { 1022 if (sg_count == 1) {
1009 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); 1023 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
1010 } else { 1024 } else {
1011 sg_link_tbl_len = cryptlen; 1025 sg_link_tbl_len = cryptlen;
1012 1026
@@ -1017,14 +1031,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1017 &edesc->link_tbl[0]); 1031 &edesc->link_tbl[0]);
1018 if (sg_count > 1) { 1032 if (sg_count > 1) {
1019 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 1033 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1020 desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); 1034 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
1021 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1035 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1022 edesc->dma_len, 1036 edesc->dma_len,
1023 DMA_BIDIRECTIONAL); 1037 DMA_BIDIRECTIONAL);
1024 } else { 1038 } else {
1025 /* Only one segment now, so no link tbl needed */ 1039 /* Only one segment now, so no link tbl needed */
1026 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq-> 1040 to_talitos_ptr(&desc->ptr[4],
1027 src)); 1041 sg_dma_address(areq->src));
1028 } 1042 }
1029 } 1043 }
1030 1044
@@ -1039,14 +1053,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1039 edesc->dst_is_chained); 1053 edesc->dst_is_chained);
1040 1054
1041 if (sg_count == 1) { 1055 if (sg_count == 1) {
1042 desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); 1056 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
1043 } else { 1057 } else {
1044 struct talitos_ptr *link_tbl_ptr = 1058 struct talitos_ptr *link_tbl_ptr =
1045 &edesc->link_tbl[edesc->src_nents + 1]; 1059 &edesc->link_tbl[edesc->src_nents + 1];
1046 1060
1047 desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *) 1061 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1048 edesc->dma_link_tbl + 1062 (edesc->src_nents + 1) *
1049 edesc->src_nents + 1); 1063 sizeof(struct talitos_ptr));
1050 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, 1064 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1051 link_tbl_ptr); 1065 link_tbl_ptr);
1052 1066
@@ -1059,11 +1073,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1059 link_tbl_ptr->len = cpu_to_be16(authsize); 1073 link_tbl_ptr->len = cpu_to_be16(authsize);
1060 1074
1061 /* icv data follows link tables */ 1075 /* icv data follows link tables */
1062 link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *) 1076 to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl +
1063 edesc->dma_link_tbl + 1077 (edesc->src_nents + edesc->dst_nents + 2) *
1064 edesc->src_nents + 1078 sizeof(struct talitos_ptr));
1065 edesc->dst_nents + 2);
1066
1067 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; 1079 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1068 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 1080 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1069 edesc->dma_len, DMA_BIDIRECTIONAL); 1081 edesc->dma_len, DMA_BIDIRECTIONAL);
@@ -1338,7 +1350,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
1338 1350
1339 /* first DWORD empty */ 1351 /* first DWORD empty */
1340 desc->ptr[0].len = 0; 1352 desc->ptr[0].len = 0;
1341 desc->ptr[0].ptr = 0; 1353 to_talitos_ptr(&desc->ptr[0], 0);
1342 desc->ptr[0].j_extent = 0; 1354 desc->ptr[0].j_extent = 0;
1343 1355
1344 /* cipher iv */ 1356 /* cipher iv */
@@ -1362,20 +1374,20 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
1362 edesc->src_is_chained); 1374 edesc->src_is_chained);
1363 1375
1364 if (sg_count == 1) { 1376 if (sg_count == 1) {
1365 desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src)); 1377 to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src));
1366 } else { 1378 } else {
1367 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, 1379 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
1368 &edesc->link_tbl[0]); 1380 &edesc->link_tbl[0]);
1369 if (sg_count > 1) { 1381 if (sg_count > 1) {
1382 to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
1370 desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; 1383 desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1371 desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl);
1372 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1384 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1373 edesc->dma_len, 1385 edesc->dma_len,
1374 DMA_BIDIRECTIONAL); 1386 DMA_BIDIRECTIONAL);
1375 } else { 1387 } else {
1376 /* Only one segment now, so no link tbl needed */ 1388 /* Only one segment now, so no link tbl needed */
1377 desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq-> 1389 to_talitos_ptr(&desc->ptr[3],
1378 src)); 1390 sg_dma_address(areq->src));
1379 } 1391 }
1380 } 1392 }
1381 1393
@@ -1390,15 +1402,15 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
1390 edesc->dst_is_chained); 1402 edesc->dst_is_chained);
1391 1403
1392 if (sg_count == 1) { 1404 if (sg_count == 1) {
1393 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst)); 1405 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst));
1394 } else { 1406 } else {
1395 struct talitos_ptr *link_tbl_ptr = 1407 struct talitos_ptr *link_tbl_ptr =
1396 &edesc->link_tbl[edesc->src_nents + 1]; 1408 &edesc->link_tbl[edesc->src_nents + 1];
1397 1409
1410 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1411 (edesc->src_nents + 1) *
1412 sizeof(struct talitos_ptr));
1398 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 1413 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1399 desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *)
1400 edesc->dma_link_tbl +
1401 edesc->src_nents + 1);
1402 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, 1414 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1403 link_tbl_ptr); 1415 link_tbl_ptr);
1404 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 1416 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
@@ -1411,7 +1423,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
1411 1423
1412 /* last DWORD empty */ 1424 /* last DWORD empty */
1413 desc->ptr[6].len = 0; 1425 desc->ptr[6].len = 0;
1414 desc->ptr[6].ptr = 0; 1426 to_talitos_ptr(&desc->ptr[6], 0);
1415 desc->ptr[6].j_extent = 0; 1427 desc->ptr[6].j_extent = 0;
1416 1428
1417 ret = talitos_submit(dev, desc, callback, areq); 1429 ret = talitos_submit(dev, desc, callback, areq);
@@ -1742,17 +1754,11 @@ static int talitos_remove(struct of_device *ofdev)
1742 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) 1754 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
1743 talitos_unregister_rng(dev); 1755 talitos_unregister_rng(dev);
1744 1756
1745 kfree(priv->submit_count); 1757 for (i = 0; i < priv->num_channels; i++)
1746 kfree(priv->tail); 1758 if (priv->chan[i].fifo)
1747 kfree(priv->head); 1759 kfree(priv->chan[i].fifo);
1748
1749 if (priv->fifo)
1750 for (i = 0; i < priv->num_channels; i++)
1751 kfree(priv->fifo[i]);
1752 1760
1753 kfree(priv->fifo); 1761 kfree(priv->chan);
1754 kfree(priv->head_lock);
1755 kfree(priv->tail_lock);
1756 1762
1757 if (priv->irq != NO_IRQ) { 1763 if (priv->irq != NO_IRQ) {
1758 free_irq(priv->irq, dev); 1764 free_irq(priv->irq, dev);
@@ -1872,58 +1878,36 @@ static int talitos_probe(struct of_device *ofdev,
1872 if (of_device_is_compatible(np, "fsl,sec2.1")) 1878 if (of_device_is_compatible(np, "fsl,sec2.1"))
1873 priv->features |= TALITOS_FTR_HW_AUTH_CHECK; 1879 priv->features |= TALITOS_FTR_HW_AUTH_CHECK;
1874 1880
1875 priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, 1881 priv->chan = kzalloc(sizeof(struct talitos_channel) *
1876 GFP_KERNEL); 1882 priv->num_channels, GFP_KERNEL);
1877 priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, 1883 if (!priv->chan) {
1878 GFP_KERNEL); 1884 dev_err(dev, "failed to allocate channel management space\n");
1879 if (!priv->head_lock || !priv->tail_lock) {
1880 dev_err(dev, "failed to allocate fifo locks\n");
1881 err = -ENOMEM; 1885 err = -ENOMEM;
1882 goto err_out; 1886 goto err_out;
1883 } 1887 }
1884 1888
1885 for (i = 0; i < priv->num_channels; i++) { 1889 for (i = 0; i < priv->num_channels; i++) {
1886 spin_lock_init(&priv->head_lock[i]); 1890 spin_lock_init(&priv->chan[i].head_lock);
1887 spin_lock_init(&priv->tail_lock[i]); 1891 spin_lock_init(&priv->chan[i].tail_lock);
1888 }
1889
1890 priv->fifo = kmalloc(sizeof(struct talitos_request *) *
1891 priv->num_channels, GFP_KERNEL);
1892 if (!priv->fifo) {
1893 dev_err(dev, "failed to allocate request fifo\n");
1894 err = -ENOMEM;
1895 goto err_out;
1896 } 1892 }
1897 1893
1898 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); 1894 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
1899 1895
1900 for (i = 0; i < priv->num_channels; i++) { 1896 for (i = 0; i < priv->num_channels; i++) {
1901 priv->fifo[i] = kzalloc(sizeof(struct talitos_request) * 1897 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
1902 priv->fifo_len, GFP_KERNEL); 1898 priv->fifo_len, GFP_KERNEL);
1903 if (!priv->fifo[i]) { 1899 if (!priv->chan[i].fifo) {
1904 dev_err(dev, "failed to allocate request fifo %d\n", i); 1900 dev_err(dev, "failed to allocate request fifo %d\n", i);
1905 err = -ENOMEM; 1901 err = -ENOMEM;
1906 goto err_out; 1902 goto err_out;
1907 } 1903 }
1908 } 1904 }
1909 1905
1910 priv->submit_count = kmalloc(sizeof(atomic_t) * priv->num_channels,
1911 GFP_KERNEL);
1912 if (!priv->submit_count) {
1913 dev_err(dev, "failed to allocate fifo submit count space\n");
1914 err = -ENOMEM;
1915 goto err_out;
1916 }
1917 for (i = 0; i < priv->num_channels; i++) 1906 for (i = 0; i < priv->num_channels; i++)
1918 atomic_set(&priv->submit_count[i], -(priv->chfifo_len - 1)); 1907 atomic_set(&priv->chan[i].submit_count,
1908 -(priv->chfifo_len - 1));
1919 1909
1920 priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); 1910 dma_set_mask(dev, DMA_BIT_MASK(36));
1921 priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
1922 if (!priv->head || !priv->tail) {
1923 dev_err(dev, "failed to allocate request index space\n");
1924 err = -ENOMEM;
1925 goto err_out;
1926 }
1927 1911
1928 /* reset and initialize the h/w */ 1912 /* reset and initialize the h/w */
1929 err = init_device(dev); 1913 err = init_device(dev);