aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig10
-rw-r--r--drivers/dma/dmatest.c7
-rw-r--r--drivers/dma/fsldma.c270
-rw-r--r--drivers/dma/fsldma.h1
-rw-r--r--drivers/dma/ioat_dma.c2
5 files changed, 100 insertions, 190 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index cd303901eb5b..904e57558bb5 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -48,13 +48,13 @@ config DW_DMAC
48 can be integrated in chips such as the Atmel AT32ap7000. 48 can be integrated in chips such as the Atmel AT32ap7000.
49 49
50config FSL_DMA 50config FSL_DMA
51 bool "Freescale MPC85xx/MPC83xx DMA support" 51 tristate "Freescale Elo and Elo Plus DMA support"
52 depends on PPC 52 depends on FSL_SOC
53 select DMA_ENGINE 53 select DMA_ENGINE
54 ---help--- 54 ---help---
55 Enable support for the Freescale DMA engine. Now, it support 55 Enable support for the Freescale Elo and Elo Plus DMA controllers.
56 MPC8560/40, MPC8555, MPC8548 and MPC8641 processors. 56 The Elo is the DMA controller on some 82xx and 83xx parts, and the
57 The MPC8349, MPC8360 is also supported. 57 Elo Plus is the DMA controller on 85xx and 86xx parts.
58 58
59config MV_XOR 59config MV_XOR
60 bool "Marvell XOR engine support" 60 bool "Marvell XOR engine support"
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index a08d19704743..d1e381e35a9e 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -325,7 +325,12 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
325 struct dmatest_thread *thread; 325 struct dmatest_thread *thread;
326 unsigned int i; 326 unsigned int i;
327 327
328 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_ATOMIC); 328 /* Have we already been told about this channel? */
329 list_for_each_entry(dtc, &dmatest_channels, node)
330 if (dtc->chan == chan)
331 return DMA_DUP;
332
333 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
329 if (!dtc) { 334 if (!dtc) {
330 pr_warning("dmatest: No memory for %s\n", chan->dev.bus_id); 335 pr_warning("dmatest: No memory for %s\n", chan->dev.bus_id);
331 return DMA_NAK; 336 return DMA_NAK;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index c0059ca58340..0b95dcce447e 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -370,7 +370,10 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *chan,
370 struct dma_client *client) 370 struct dma_client *client)
371{ 371{
372 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 372 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
373 LIST_HEAD(tmp_list); 373
374 /* Has this channel already been allocated? */
375 if (fsl_chan->desc_pool)
376 return 1;
374 377
375 /* We need the descriptor to be aligned to 32bytes 378 /* We need the descriptor to be aligned to 32bytes
376 * for meeting FSL DMA specification requirement. 379 * for meeting FSL DMA specification requirement.
@@ -410,6 +413,8 @@ static void fsl_dma_free_chan_resources(struct dma_chan *chan)
410 } 413 }
411 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 414 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
412 dma_pool_destroy(fsl_chan->desc_pool); 415 dma_pool_destroy(fsl_chan->desc_pool);
416
417 fsl_chan->desc_pool = NULL;
413} 418}
414 419
415static struct dma_async_tx_descriptor * 420static struct dma_async_tx_descriptor *
@@ -786,159 +791,29 @@ static void dma_do_tasklet(unsigned long data)
786 fsl_chan_ld_cleanup(fsl_chan); 791 fsl_chan_ld_cleanup(fsl_chan);
787} 792}
788 793
789static void fsl_dma_callback_test(void *param) 794static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
790{ 795 struct device_node *node, u32 feature, const char *compatible)
791 struct fsl_dma_chan *fsl_chan = param;
792 if (fsl_chan)
793 dev_dbg(fsl_chan->dev, "selftest: callback is ok!\n");
794}
795
796static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
797{
798 struct dma_chan *chan;
799 int err = 0;
800 dma_addr_t dma_dest, dma_src;
801 dma_cookie_t cookie;
802 u8 *src, *dest;
803 int i;
804 size_t test_size;
805 struct dma_async_tx_descriptor *tx1, *tx2, *tx3;
806
807 test_size = 4096;
808
809 src = kmalloc(test_size * 2, GFP_KERNEL);
810 if (!src) {
811 dev_err(fsl_chan->dev,
812 "selftest: Cannot alloc memory for test!\n");
813 return -ENOMEM;
814 }
815
816 dest = src + test_size;
817
818 for (i = 0; i < test_size; i++)
819 src[i] = (u8) i;
820
821 chan = &fsl_chan->common;
822
823 if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) {
824 dev_err(fsl_chan->dev,
825 "selftest: Cannot alloc resources for DMA\n");
826 err = -ENODEV;
827 goto out;
828 }
829
830 /* TX 1 */
831 dma_src = dma_map_single(fsl_chan->dev, src, test_size / 2,
832 DMA_TO_DEVICE);
833 dma_dest = dma_map_single(fsl_chan->dev, dest, test_size / 2,
834 DMA_FROM_DEVICE);
835 tx1 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 2, 0);
836 async_tx_ack(tx1);
837
838 cookie = fsl_dma_tx_submit(tx1);
839 fsl_dma_memcpy_issue_pending(chan);
840 msleep(2);
841
842 if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) {
843 dev_err(fsl_chan->dev, "selftest: Time out!\n");
844 err = -ENODEV;
845 goto free_resources;
846 }
847
848 /* Test free and re-alloc channel resources */
849 fsl_dma_free_chan_resources(chan);
850
851 if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) {
852 dev_err(fsl_chan->dev,
853 "selftest: Cannot alloc resources for DMA\n");
854 err = -ENODEV;
855 goto free_resources;
856 }
857
858 /* Continue to test
859 * TX 2
860 */
861 dma_src = dma_map_single(fsl_chan->dev, src + test_size / 2,
862 test_size / 4, DMA_TO_DEVICE);
863 dma_dest = dma_map_single(fsl_chan->dev, dest + test_size / 2,
864 test_size / 4, DMA_FROM_DEVICE);
865 tx2 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0);
866 async_tx_ack(tx2);
867
868 /* TX 3 */
869 dma_src = dma_map_single(fsl_chan->dev, src + test_size * 3 / 4,
870 test_size / 4, DMA_TO_DEVICE);
871 dma_dest = dma_map_single(fsl_chan->dev, dest + test_size * 3 / 4,
872 test_size / 4, DMA_FROM_DEVICE);
873 tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0);
874 async_tx_ack(tx3);
875
876 /* Interrupt tx test */
877 tx1 = fsl_dma_prep_interrupt(chan, 0);
878 async_tx_ack(tx1);
879 cookie = fsl_dma_tx_submit(tx1);
880
881 /* Test exchanging the prepared tx sort */
882 cookie = fsl_dma_tx_submit(tx3);
883 cookie = fsl_dma_tx_submit(tx2);
884
885 if (dma_has_cap(DMA_INTERRUPT, ((struct fsl_dma_device *)
886 dev_get_drvdata(fsl_chan->dev->parent))->common.cap_mask)) {
887 tx3->callback = fsl_dma_callback_test;
888 tx3->callback_param = fsl_chan;
889 }
890 fsl_dma_memcpy_issue_pending(chan);
891 msleep(2);
892
893 if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) {
894 dev_err(fsl_chan->dev, "selftest: Time out!\n");
895 err = -ENODEV;
896 goto free_resources;
897 }
898
899 err = memcmp(src, dest, test_size);
900 if (err) {
901 for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size);
902 i++);
903 dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%ld is "
904 "error! src 0x%x, dest 0x%x\n",
905 i, (long)test_size, *(src + i), *(dest + i));
906 }
907
908free_resources:
909 fsl_dma_free_chan_resources(chan);
910out:
911 kfree(src);
912 return err;
913}
914
915static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
916 const struct of_device_id *match)
917{ 796{
918 struct fsl_dma_device *fdev;
919 struct fsl_dma_chan *new_fsl_chan; 797 struct fsl_dma_chan *new_fsl_chan;
920 int err; 798 int err;
921 799
922 fdev = dev_get_drvdata(dev->dev.parent);
923 BUG_ON(!fdev);
924
925 /* alloc channel */ 800 /* alloc channel */
926 new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL); 801 new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL);
927 if (!new_fsl_chan) { 802 if (!new_fsl_chan) {
928 dev_err(&dev->dev, "No free memory for allocating " 803 dev_err(fdev->dev, "No free memory for allocating "
929 "dma channels!\n"); 804 "dma channels!\n");
930 return -ENOMEM; 805 return -ENOMEM;
931 } 806 }
932 807
933 /* get dma channel register base */ 808 /* get dma channel register base */
934 err = of_address_to_resource(dev->node, 0, &new_fsl_chan->reg); 809 err = of_address_to_resource(node, 0, &new_fsl_chan->reg);
935 if (err) { 810 if (err) {
936 dev_err(&dev->dev, "Can't get %s property 'reg'\n", 811 dev_err(fdev->dev, "Can't get %s property 'reg'\n",
937 dev->node->full_name); 812 node->full_name);
938 goto err_no_reg; 813 goto err_no_reg;
939 } 814 }
940 815
941 new_fsl_chan->feature = *(u32 *)match->data; 816 new_fsl_chan->feature = feature;
942 817
943 if (!fdev->feature) 818 if (!fdev->feature)
944 fdev->feature = new_fsl_chan->feature; 819 fdev->feature = new_fsl_chan->feature;
@@ -948,13 +823,13 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
948 */ 823 */
949 WARN_ON(fdev->feature != new_fsl_chan->feature); 824 WARN_ON(fdev->feature != new_fsl_chan->feature);
950 825
951 new_fsl_chan->dev = &dev->dev; 826 new_fsl_chan->dev = &new_fsl_chan->common.dev;
952 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, 827 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
953 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); 828 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
954 829
955 new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; 830 new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
956 if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) { 831 if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) {
957 dev_err(&dev->dev, "There is no %d channel!\n", 832 dev_err(fdev->dev, "There is no %d channel!\n",
958 new_fsl_chan->id); 833 new_fsl_chan->id);
959 err = -EINVAL; 834 err = -EINVAL;
960 goto err_no_chan; 835 goto err_no_chan;
@@ -988,29 +863,23 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
988 &fdev->common.channels); 863 &fdev->common.channels);
989 fdev->common.chancnt++; 864 fdev->common.chancnt++;
990 865
991 new_fsl_chan->irq = irq_of_parse_and_map(dev->node, 0); 866 new_fsl_chan->irq = irq_of_parse_and_map(node, 0);
992 if (new_fsl_chan->irq != NO_IRQ) { 867 if (new_fsl_chan->irq != NO_IRQ) {
993 err = request_irq(new_fsl_chan->irq, 868 err = request_irq(new_fsl_chan->irq,
994 &fsl_dma_chan_do_interrupt, IRQF_SHARED, 869 &fsl_dma_chan_do_interrupt, IRQF_SHARED,
995 "fsldma-channel", new_fsl_chan); 870 "fsldma-channel", new_fsl_chan);
996 if (err) { 871 if (err) {
997 dev_err(&dev->dev, "DMA channel %s request_irq error " 872 dev_err(fdev->dev, "DMA channel %s request_irq error "
998 "with return %d\n", dev->node->full_name, err); 873 "with return %d\n", node->full_name, err);
999 goto err_no_irq; 874 goto err_no_irq;
1000 } 875 }
1001 } 876 }
1002 877
1003 err = fsl_dma_self_test(new_fsl_chan); 878 dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
1004 if (err) 879 compatible, new_fsl_chan->irq);
1005 goto err_self_test;
1006
1007 dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
1008 match->compatible, new_fsl_chan->irq);
1009 880
1010 return 0; 881 return 0;
1011 882
1012err_self_test:
1013 free_irq(new_fsl_chan->irq, new_fsl_chan);
1014err_no_irq: 883err_no_irq:
1015 list_del(&new_fsl_chan->common.device_node); 884 list_del(&new_fsl_chan->common.device_node);
1016err_no_chan: 885err_no_chan:
@@ -1020,38 +889,20 @@ err_no_reg:
1020 return err; 889 return err;
1021} 890}
1022 891
1023const u32 mpc8540_dma_ip_feature = FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN; 892static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan)
1024const u32 mpc8349_dma_ip_feature = FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN;
1025
1026static struct of_device_id of_fsl_dma_chan_ids[] = {
1027 {
1028 .compatible = "fsl,eloplus-dma-channel",
1029 .data = (void *)&mpc8540_dma_ip_feature,
1030 },
1031 {
1032 .compatible = "fsl,elo-dma-channel",
1033 .data = (void *)&mpc8349_dma_ip_feature,
1034 },
1035 {}
1036};
1037
1038static struct of_platform_driver of_fsl_dma_chan_driver = {
1039 .name = "of-fsl-dma-channel",
1040 .match_table = of_fsl_dma_chan_ids,
1041 .probe = of_fsl_dma_chan_probe,
1042};
1043
1044static __init int of_fsl_dma_chan_init(void)
1045{ 893{
1046 return of_register_platform_driver(&of_fsl_dma_chan_driver); 894 free_irq(fchan->irq, fchan);
895 list_del(&fchan->common.device_node);
896 iounmap(fchan->reg_base);
897 kfree(fchan);
1047} 898}
1048 899
1049static int __devinit of_fsl_dma_probe(struct of_device *dev, 900static int __devinit of_fsl_dma_probe(struct of_device *dev,
1050 const struct of_device_id *match) 901 const struct of_device_id *match)
1051{ 902{
1052 int err; 903 int err;
1053 unsigned int irq;
1054 struct fsl_dma_device *fdev; 904 struct fsl_dma_device *fdev;
905 struct device_node *child;
1055 906
1056 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); 907 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
1057 if (!fdev) { 908 if (!fdev) {
@@ -1085,9 +936,9 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
1085 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 936 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1086 fdev->common.dev = &dev->dev; 937 fdev->common.dev = &dev->dev;
1087 938
1088 irq = irq_of_parse_and_map(dev->node, 0); 939 fdev->irq = irq_of_parse_and_map(dev->node, 0);
1089 if (irq != NO_IRQ) { 940 if (fdev->irq != NO_IRQ) {
1090 err = request_irq(irq, &fsl_dma_do_interrupt, IRQF_SHARED, 941 err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED,
1091 "fsldma-device", fdev); 942 "fsldma-device", fdev);
1092 if (err) { 943 if (err) {
1093 dev_err(&dev->dev, "DMA device request_irq error " 944 dev_err(&dev->dev, "DMA device request_irq error "
@@ -1097,7 +948,21 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
1097 } 948 }
1098 949
1099 dev_set_drvdata(&(dev->dev), fdev); 950 dev_set_drvdata(&(dev->dev), fdev);
1100 of_platform_bus_probe(dev->node, of_fsl_dma_chan_ids, &dev->dev); 951
952 /* We cannot use of_platform_bus_probe() because there is no
953 * of_platform_bus_remove. Instead, we manually instantiate every DMA
954 * channel object.
955 */
956 for_each_child_of_node(dev->node, child) {
957 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel"))
958 fsl_dma_chan_probe(fdev, child,
959 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
960 "fsl,eloplus-dma-channel");
961 if (of_device_is_compatible(child, "fsl,elo-dma-channel"))
962 fsl_dma_chan_probe(fdev, child,
963 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
964 "fsl,elo-dma-channel");
965 }
1101 966
1102 dma_async_device_register(&fdev->common); 967 dma_async_device_register(&fdev->common);
1103 return 0; 968 return 0;
@@ -1109,6 +974,30 @@ err_no_reg:
1109 return err; 974 return err;
1110} 975}
1111 976
977static int of_fsl_dma_remove(struct of_device *of_dev)
978{
979 struct fsl_dma_device *fdev;
980 unsigned int i;
981
982 fdev = dev_get_drvdata(&of_dev->dev);
983
984 dma_async_device_unregister(&fdev->common);
985
986 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++)
987 if (fdev->chan[i])
988 fsl_dma_chan_remove(fdev->chan[i]);
989
990 if (fdev->irq != NO_IRQ)
991 free_irq(fdev->irq, fdev);
992
993 iounmap(fdev->reg_base);
994
995 kfree(fdev);
996 dev_set_drvdata(&of_dev->dev, NULL);
997
998 return 0;
999}
1000
1112static struct of_device_id of_fsl_dma_ids[] = { 1001static struct of_device_id of_fsl_dma_ids[] = {
1113 { .compatible = "fsl,eloplus-dma", }, 1002 { .compatible = "fsl,eloplus-dma", },
1114 { .compatible = "fsl,elo-dma", }, 1003 { .compatible = "fsl,elo-dma", },
@@ -1116,15 +1005,32 @@ static struct of_device_id of_fsl_dma_ids[] = {
1116}; 1005};
1117 1006
1118static struct of_platform_driver of_fsl_dma_driver = { 1007static struct of_platform_driver of_fsl_dma_driver = {
1119 .name = "of-fsl-dma", 1008 .name = "fsl-elo-dma",
1120 .match_table = of_fsl_dma_ids, 1009 .match_table = of_fsl_dma_ids,
1121 .probe = of_fsl_dma_probe, 1010 .probe = of_fsl_dma_probe,
1011 .remove = of_fsl_dma_remove,
1122}; 1012};
1123 1013
1124static __init int of_fsl_dma_init(void) 1014static __init int of_fsl_dma_init(void)
1125{ 1015{
1126 return of_register_platform_driver(&of_fsl_dma_driver); 1016 int ret;
1017
1018 pr_info("Freescale Elo / Elo Plus DMA driver\n");
1019
1020 ret = of_register_platform_driver(&of_fsl_dma_driver);
1021 if (ret)
1022 pr_err("fsldma: failed to register platform driver\n");
1023
1024 return ret;
1025}
1026
1027static void __exit of_fsl_dma_exit(void)
1028{
1029 of_unregister_platform_driver(&of_fsl_dma_driver);
1127} 1030}
1128 1031
1129subsys_initcall(of_fsl_dma_chan_init);
1130subsys_initcall(of_fsl_dma_init); 1032subsys_initcall(of_fsl_dma_init);
1033module_exit(of_fsl_dma_exit);
1034
1035MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
1036MODULE_LICENSE("GPL");
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 6faf07ba0d0e..4f21a512d848 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -114,6 +114,7 @@ struct fsl_dma_device {
114 struct dma_device common; 114 struct dma_device common;
115 struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; 115 struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
116 u32 feature; /* The same as DMA channels */ 116 u32 feature; /* The same as DMA channels */
117 int irq; /* Channel IRQ */
117}; 118};
118 119
119/* Define macros for fsl_dma_chan->feature property */ 120/* Define macros for fsl_dma_chan->feature property */
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index bc8c6e3470ca..1ef68b315657 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -971,11 +971,9 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
971 switch (ioat_chan->device->version) { 971 switch (ioat_chan->device->version) {
972 case IOAT_VER_1_2: 972 case IOAT_VER_1_2:
973 return ioat1_dma_get_next_descriptor(ioat_chan); 973 return ioat1_dma_get_next_descriptor(ioat_chan);
974 break;
975 case IOAT_VER_2_0: 974 case IOAT_VER_2_0:
976 case IOAT_VER_3_0: 975 case IOAT_VER_3_0:
977 return ioat2_dma_get_next_descriptor(ioat_chan); 976 return ioat2_dma_get_next_descriptor(ioat_chan);
978 break;
979 } 977 }
980 return NULL; 978 return NULL;
981} 979}