diff options
author | Kim Phillips <kim.phillips@freescale.com> | 2009-08-12 21:51:51 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2009-08-12 21:51:51 -0400 |
commit | 81eb024c7e63f53b871797f6e2defccfd008dcd4 (patch) | |
tree | 80abaf1e025cd192ba44645f778ac0ebd5569c86 /drivers/crypto/talitos.c | |
parent | 4b992628812137e30cda3510510cf3c052345b30 (diff) |
crypto: talitos - add support for 36 bit addressing
Enabling extended addressing in the h/w requires we always assign the
extended address component (eptr) of the talitos h/w pointer. This is
for e500 based platforms with large memories.
Signed-off-by: Kim Phillips <kim.phillips@freescale.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/talitos.c')
-rw-r--r-- | drivers/crypto/talitos.c | 69 |
1 files changed, 40 insertions, 29 deletions
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 5013a2dd47ed..c47ffe8a73ef 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -146,6 +146,12 @@ struct talitos_private { | |||
146 | #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 | 146 | #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 |
147 | #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 | 147 | #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 |
148 | 148 | ||
149 | static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) | ||
150 | { | ||
151 | talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); | ||
152 | talitos_ptr->eptr = cpu_to_be32(upper_32_bits(dma_addr)); | ||
153 | } | ||
154 | |||
149 | /* | 155 | /* |
150 | * map virtual single (contiguous) pointer to h/w descriptor pointer | 156 | * map virtual single (contiguous) pointer to h/w descriptor pointer |
151 | */ | 157 | */ |
@@ -155,8 +161,10 @@ static void map_single_talitos_ptr(struct device *dev, | |||
155 | unsigned char extent, | 161 | unsigned char extent, |
156 | enum dma_data_direction dir) | 162 | enum dma_data_direction dir) |
157 | { | 163 | { |
164 | dma_addr_t dma_addr = dma_map_single(dev, data, len, dir); | ||
165 | |||
158 | talitos_ptr->len = cpu_to_be16(len); | 166 | talitos_ptr->len = cpu_to_be16(len); |
159 | talitos_ptr->ptr = cpu_to_be32(dma_map_single(dev, data, len, dir)); | 167 | to_talitos_ptr(talitos_ptr, dma_addr); |
160 | talitos_ptr->j_extent = extent; | 168 | talitos_ptr->j_extent = extent; |
161 | } | 169 | } |
162 | 170 | ||
@@ -187,9 +195,9 @@ static int reset_channel(struct device *dev, int ch) | |||
187 | return -EIO; | 195 | return -EIO; |
188 | } | 196 | } |
189 | 197 | ||
190 | /* set done writeback and IRQ */ | 198 | /* set 36-bit addressing, done writeback enable and done IRQ enable */ |
191 | setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE | | 199 | setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_EAE | |
192 | TALITOS_CCCR_LO_CDIE); | 200 | TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE); |
193 | 201 | ||
194 | /* and ICCR writeback, if available */ | 202 | /* and ICCR writeback, if available */ |
195 | if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) | 203 | if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) |
@@ -312,7 +320,10 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc, | |||
312 | 320 | ||
313 | /* GO! */ | 321 | /* GO! */ |
314 | wmb(); | 322 | wmb(); |
315 | out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc); | 323 | out_be32(priv->reg + TALITOS_FF(ch), |
324 | cpu_to_be32(upper_32_bits(request->dma_desc))); | ||
325 | out_be32(priv->reg + TALITOS_FF_LO(ch), | ||
326 | cpu_to_be32(lower_32_bits(request->dma_desc))); | ||
316 | 327 | ||
317 | spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); | 328 | spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); |
318 | 329 | ||
@@ -934,7 +945,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, | |||
934 | int n_sg = sg_count; | 945 | int n_sg = sg_count; |
935 | 946 | ||
936 | while (n_sg--) { | 947 | while (n_sg--) { |
937 | link_tbl_ptr->ptr = cpu_to_be32(sg_dma_address(sg)); | 948 | to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg)); |
938 | link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); | 949 | link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); |
939 | link_tbl_ptr->j_extent = 0; | 950 | link_tbl_ptr->j_extent = 0; |
940 | link_tbl_ptr++; | 951 | link_tbl_ptr++; |
@@ -1009,7 +1020,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1009 | edesc->src_is_chained); | 1020 | edesc->src_is_chained); |
1010 | 1021 | ||
1011 | if (sg_count == 1) { | 1022 | if (sg_count == 1) { |
1012 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); | 1023 | to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src)); |
1013 | } else { | 1024 | } else { |
1014 | sg_link_tbl_len = cryptlen; | 1025 | sg_link_tbl_len = cryptlen; |
1015 | 1026 | ||
@@ -1020,14 +1031,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1020 | &edesc->link_tbl[0]); | 1031 | &edesc->link_tbl[0]); |
1021 | if (sg_count > 1) { | 1032 | if (sg_count > 1) { |
1022 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; | 1033 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; |
1023 | desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); | 1034 | to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl); |
1024 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | 1035 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, |
1025 | edesc->dma_len, | 1036 | edesc->dma_len, |
1026 | DMA_BIDIRECTIONAL); | 1037 | DMA_BIDIRECTIONAL); |
1027 | } else { | 1038 | } else { |
1028 | /* Only one segment now, so no link tbl needed */ | 1039 | /* Only one segment now, so no link tbl needed */ |
1029 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq-> | 1040 | to_talitos_ptr(&desc->ptr[4], |
1030 | src)); | 1041 | sg_dma_address(areq->src)); |
1031 | } | 1042 | } |
1032 | } | 1043 | } |
1033 | 1044 | ||
@@ -1042,14 +1053,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1042 | edesc->dst_is_chained); | 1053 | edesc->dst_is_chained); |
1043 | 1054 | ||
1044 | if (sg_count == 1) { | 1055 | if (sg_count == 1) { |
1045 | desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); | 1056 | to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst)); |
1046 | } else { | 1057 | } else { |
1047 | struct talitos_ptr *link_tbl_ptr = | 1058 | struct talitos_ptr *link_tbl_ptr = |
1048 | &edesc->link_tbl[edesc->src_nents + 1]; | 1059 | &edesc->link_tbl[edesc->src_nents + 1]; |
1049 | 1060 | ||
1050 | desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *) | 1061 | to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + |
1051 | edesc->dma_link_tbl + | 1062 | (edesc->src_nents + 1) * |
1052 | edesc->src_nents + 1); | 1063 | sizeof(struct talitos_ptr)); |
1053 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, | 1064 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, |
1054 | link_tbl_ptr); | 1065 | link_tbl_ptr); |
1055 | 1066 | ||
@@ -1062,11 +1073,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1062 | link_tbl_ptr->len = cpu_to_be16(authsize); | 1073 | link_tbl_ptr->len = cpu_to_be16(authsize); |
1063 | 1074 | ||
1064 | /* icv data follows link tables */ | 1075 | /* icv data follows link tables */ |
1065 | link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *) | 1076 | to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl + |
1066 | edesc->dma_link_tbl + | 1077 | (edesc->src_nents + edesc->dst_nents + 2) * |
1067 | edesc->src_nents + | 1078 | sizeof(struct talitos_ptr)); |
1068 | edesc->dst_nents + 2); | ||
1069 | |||
1070 | desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; | 1079 | desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; |
1071 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, | 1080 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, |
1072 | edesc->dma_len, DMA_BIDIRECTIONAL); | 1081 | edesc->dma_len, DMA_BIDIRECTIONAL); |
@@ -1341,7 +1350,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1341 | 1350 | ||
1342 | /* first DWORD empty */ | 1351 | /* first DWORD empty */ |
1343 | desc->ptr[0].len = 0; | 1352 | desc->ptr[0].len = 0; |
1344 | desc->ptr[0].ptr = 0; | 1353 | to_talitos_ptr(&desc->ptr[0], 0); |
1345 | desc->ptr[0].j_extent = 0; | 1354 | desc->ptr[0].j_extent = 0; |
1346 | 1355 | ||
1347 | /* cipher iv */ | 1356 | /* cipher iv */ |
@@ -1365,20 +1374,20 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1365 | edesc->src_is_chained); | 1374 | edesc->src_is_chained); |
1366 | 1375 | ||
1367 | if (sg_count == 1) { | 1376 | if (sg_count == 1) { |
1368 | desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src)); | 1377 | to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src)); |
1369 | } else { | 1378 | } else { |
1370 | sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, | 1379 | sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, |
1371 | &edesc->link_tbl[0]); | 1380 | &edesc->link_tbl[0]); |
1372 | if (sg_count > 1) { | 1381 | if (sg_count > 1) { |
1382 | to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl); | ||
1373 | desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; | 1383 | desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; |
1374 | desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl); | ||
1375 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | 1384 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, |
1376 | edesc->dma_len, | 1385 | edesc->dma_len, |
1377 | DMA_BIDIRECTIONAL); | 1386 | DMA_BIDIRECTIONAL); |
1378 | } else { | 1387 | } else { |
1379 | /* Only one segment now, so no link tbl needed */ | 1388 | /* Only one segment now, so no link tbl needed */ |
1380 | desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq-> | 1389 | to_talitos_ptr(&desc->ptr[3], |
1381 | src)); | 1390 | sg_dma_address(areq->src)); |
1382 | } | 1391 | } |
1383 | } | 1392 | } |
1384 | 1393 | ||
@@ -1393,15 +1402,15 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1393 | edesc->dst_is_chained); | 1402 | edesc->dst_is_chained); |
1394 | 1403 | ||
1395 | if (sg_count == 1) { | 1404 | if (sg_count == 1) { |
1396 | desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst)); | 1405 | to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst)); |
1397 | } else { | 1406 | } else { |
1398 | struct talitos_ptr *link_tbl_ptr = | 1407 | struct talitos_ptr *link_tbl_ptr = |
1399 | &edesc->link_tbl[edesc->src_nents + 1]; | 1408 | &edesc->link_tbl[edesc->src_nents + 1]; |
1400 | 1409 | ||
1410 | to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl + | ||
1411 | (edesc->src_nents + 1) * | ||
1412 | sizeof(struct talitos_ptr)); | ||
1401 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; | 1413 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; |
1402 | desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *) | ||
1403 | edesc->dma_link_tbl + | ||
1404 | edesc->src_nents + 1); | ||
1405 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, | 1414 | sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, |
1406 | link_tbl_ptr); | 1415 | link_tbl_ptr); |
1407 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, | 1416 | dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, |
@@ -1414,7 +1423,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1414 | 1423 | ||
1415 | /* last DWORD empty */ | 1424 | /* last DWORD empty */ |
1416 | desc->ptr[6].len = 0; | 1425 | desc->ptr[6].len = 0; |
1417 | desc->ptr[6].ptr = 0; | 1426 | to_talitos_ptr(&desc->ptr[6], 0); |
1418 | desc->ptr[6].j_extent = 0; | 1427 | desc->ptr[6].j_extent = 0; |
1419 | 1428 | ||
1420 | ret = talitos_submit(dev, desc, callback, areq); | 1429 | ret = talitos_submit(dev, desc, callback, areq); |
@@ -1898,6 +1907,8 @@ static int talitos_probe(struct of_device *ofdev, | |||
1898 | atomic_set(&priv->chan[i].submit_count, | 1907 | atomic_set(&priv->chan[i].submit_count, |
1899 | -(priv->chfifo_len - 1)); | 1908 | -(priv->chfifo_len - 1)); |
1900 | 1909 | ||
1910 | dma_set_mask(dev, DMA_BIT_MASK(36)); | ||
1911 | |||
1901 | /* reset and initialize the h/w */ | 1912 | /* reset and initialize the h/w */ |
1902 | err = init_device(dev); | 1913 | err = init_device(dev); |
1903 | if (err) { | 1914 | if (err) { |