aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/crypto/atmel-aes.c210
1 files changed, 108 insertions, 102 deletions
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 208fa8dce7f7..e964cb03cca5 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -166,6 +166,7 @@ static struct atmel_aes_drv atmel_aes = {
166 .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock), 166 .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
167}; 167};
168 168
169/* Shared functions */
169 170
170static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset) 171static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
171{ 172{
@@ -302,6 +303,38 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
302 return err; 303 return err;
303} 304}
304 305
306static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
307 const u32 *iv)
308{
309 u32 valmr = 0;
310
311 /* MR register must be set before IV registers */
312 if (dd->ctx->keylen == AES_KEYSIZE_128)
313 valmr |= AES_MR_KEYSIZE_128;
314 else if (dd->ctx->keylen == AES_KEYSIZE_192)
315 valmr |= AES_MR_KEYSIZE_192;
316 else
317 valmr |= AES_MR_KEYSIZE_256;
318
319 valmr |= dd->flags & AES_FLAGS_MODE_MASK;
320
321 if (use_dma) {
322 valmr |= AES_MR_SMOD_IDATAR0;
323 if (dd->caps.has_dualbuff)
324 valmr |= AES_MR_DUALBUFF;
325 } else {
326 valmr |= AES_MR_SMOD_AUTO;
327 }
328
329 atmel_aes_write(dd, AES_MR, valmr);
330
331 atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
332 SIZE_IN_WORDS(dd->ctx->keylen));
333
334 if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
335 atmel_aes_write_block(dd, AES_IVR(0), iv);
336}
337
305 338
306/* CPU transfer */ 339/* CPU transfer */
307 340
@@ -661,38 +694,6 @@ static void atmel_aes_dma_callback(void *data)
661 (void)dd->resume(dd); 694 (void)dd->resume(dd);
662} 695}
663 696
664static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
665 const u32 *iv)
666{
667 u32 valmr = 0;
668
669 /* MR register must be set before IV registers */
670 if (dd->ctx->keylen == AES_KEYSIZE_128)
671 valmr |= AES_MR_KEYSIZE_128;
672 else if (dd->ctx->keylen == AES_KEYSIZE_192)
673 valmr |= AES_MR_KEYSIZE_192;
674 else
675 valmr |= AES_MR_KEYSIZE_256;
676
677 valmr |= dd->flags & AES_FLAGS_MODE_MASK;
678
679 if (use_dma) {
680 valmr |= AES_MR_SMOD_IDATAR0;
681 if (dd->caps.has_dualbuff)
682 valmr |= AES_MR_DUALBUFF;
683 } else {
684 valmr |= AES_MR_SMOD_AUTO;
685 }
686
687 atmel_aes_write(dd, AES_MR, valmr);
688
689 atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
690 SIZE_IN_WORDS(dd->ctx->keylen));
691
692 if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
693 atmel_aes_write_block(dd, AES_IVR(0), iv);
694}
695
696static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, 697static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
697 struct crypto_async_request *new_areq) 698 struct crypto_async_request *new_areq)
698{ 699{
@@ -730,6 +731,9 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
730 return (dd->is_async) ? ret : err; 731 return (dd->is_async) ? ret : err;
731} 732}
732 733
734
735/* AES async block ciphers */
736
733static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd) 737static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd)
734{ 738{
735 return atmel_aes_complete(dd, 0); 739 return atmel_aes_complete(dd, 0);
@@ -758,26 +762,6 @@ static int atmel_aes_start(struct atmel_aes_dev *dd)
758 atmel_aes_transfer_complete); 762 atmel_aes_transfer_complete);
759} 763}
760 764
761
762static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
763{
764 dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER);
765 dd->buflen = ATMEL_AES_BUFFER_SIZE;
766 dd->buflen &= ~(AES_BLOCK_SIZE - 1);
767
768 if (!dd->buf) {
769 dev_err(dd->dev, "unable to alloc pages.\n");
770 return -ENOMEM;
771 }
772
773 return 0;
774}
775
776static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
777{
778 free_page((unsigned long)dd->buf);
779}
780
781static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) 765static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
782{ 766{
783 struct atmel_aes_base_ctx *ctx; 767 struct atmel_aes_base_ctx *ctx;
@@ -817,56 +801,6 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
817 return atmel_aes_handle_queue(dd, &req->base); 801 return atmel_aes_handle_queue(dd, &req->base);
818} 802}
819 803
820static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
821{
822 struct at_dma_slave *sl = slave;
823
824 if (sl && sl->dma_dev == chan->device->dev) {
825 chan->private = sl;
826 return true;
827 } else {
828 return false;
829 }
830}
831
832static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
833 struct crypto_platform_data *pdata)
834{
835 struct at_dma_slave *slave;
836 int err = -ENOMEM;
837 dma_cap_mask_t mask;
838
839 dma_cap_zero(mask);
840 dma_cap_set(DMA_SLAVE, mask);
841
842 /* Try to grab 2 DMA channels */
843 slave = &pdata->dma_slave->rxdata;
844 dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
845 slave, dd->dev, "tx");
846 if (!dd->src.chan)
847 goto err_dma_in;
848
849 slave = &pdata->dma_slave->txdata;
850 dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
851 slave, dd->dev, "rx");
852 if (!dd->dst.chan)
853 goto err_dma_out;
854
855 return 0;
856
857err_dma_out:
858 dma_release_channel(dd->src.chan);
859err_dma_in:
860 dev_warn(dd->dev, "no DMA channel available\n");
861 return err;
862}
863
864static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
865{
866 dma_release_channel(dd->dst.chan);
867 dma_release_channel(dd->src.chan);
868}
869
870static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 804static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
871 unsigned int keylen) 805 unsigned int keylen)
872{ 806{
@@ -1181,6 +1115,78 @@ static struct crypto_alg aes_cfb64_alg = {
1181 } 1115 }
1182}; 1116};
1183 1117
1118
1119/* Probe functions */
1120
1121static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
1122{
1123 dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER);
1124 dd->buflen = ATMEL_AES_BUFFER_SIZE;
1125 dd->buflen &= ~(AES_BLOCK_SIZE - 1);
1126
1127 if (!dd->buf) {
1128 dev_err(dd->dev, "unable to alloc pages.\n");
1129 return -ENOMEM;
1130 }
1131
1132 return 0;
1133}
1134
1135static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
1136{
1137 free_page((unsigned long)dd->buf);
1138}
1139
1140static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
1141{
1142 struct at_dma_slave *sl = slave;
1143
1144 if (sl && sl->dma_dev == chan->device->dev) {
1145 chan->private = sl;
1146 return true;
1147 } else {
1148 return false;
1149 }
1150}
1151
1152static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
1153 struct crypto_platform_data *pdata)
1154{
1155 struct at_dma_slave *slave;
1156 int err = -ENOMEM;
1157 dma_cap_mask_t mask;
1158
1159 dma_cap_zero(mask);
1160 dma_cap_set(DMA_SLAVE, mask);
1161
1162 /* Try to grab 2 DMA channels */
1163 slave = &pdata->dma_slave->rxdata;
1164 dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
1165 slave, dd->dev, "tx");
1166 if (!dd->src.chan)
1167 goto err_dma_in;
1168
1169 slave = &pdata->dma_slave->txdata;
1170 dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
1171 slave, dd->dev, "rx");
1172 if (!dd->dst.chan)
1173 goto err_dma_out;
1174
1175 return 0;
1176
1177err_dma_out:
1178 dma_release_channel(dd->src.chan);
1179err_dma_in:
1180 dev_warn(dd->dev, "no DMA channel available\n");
1181 return err;
1182}
1183
1184static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
1185{
1186 dma_release_channel(dd->dst.chan);
1187 dma_release_channel(dd->src.chan);
1188}
1189
1184static void atmel_aes_queue_task(unsigned long data) 1190static void atmel_aes_queue_task(unsigned long data)
1185{ 1191{
1186 struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data; 1192 struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;