aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVinod Koul <vkoul@kernel.org>2018-10-24 04:16:18 -0400
committerVinod Koul <vkoul@kernel.org>2018-10-24 04:16:18 -0400
commitbfda9020870824c497c674901142f836c199dde9 (patch)
tree1189a3d2d02d7589e3b4bfb1fedcd113d8d6f986
parent4fa7393bda7b558f8ff546ebad51a445da86cd58 (diff)
parenta3e403161fe1cc87bdbcfb8a4a4eab7aa6ac1a96 (diff)
Merge branch 'topic/owl' into for-linus
-rw-r--r--drivers/dma/owl-dma.c283
1 files changed, 276 insertions, 7 deletions
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 7812a6338acd..90bbcef99ef8 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -21,6 +21,7 @@
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/of_device.h> 23#include <linux/of_device.h>
24#include <linux/of_dma.h>
24#include <linux/slab.h> 25#include <linux/slab.h>
25#include "virt-dma.h" 26#include "virt-dma.h"
26 27
@@ -161,10 +162,12 @@ struct owl_dma_lli {
161 * struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor 162 * struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor
162 * @vd: virtual DMA descriptor 163 * @vd: virtual DMA descriptor
163 * @lli_list: link list of lli nodes 164 * @lli_list: link list of lli nodes
165 * @cyclic: flag to indicate cyclic transfers
164 */ 166 */
165struct owl_dma_txd { 167struct owl_dma_txd {
166 struct virt_dma_desc vd; 168 struct virt_dma_desc vd;
167 struct list_head lli_list; 169 struct list_head lli_list;
170 bool cyclic;
168}; 171};
169 172
170/** 173/**
@@ -186,11 +189,15 @@ struct owl_dma_pchan {
186 * @vc: wrappped virtual channel 189 * @vc: wrappped virtual channel
187 * @pchan: the physical channel utilized by this channel 190 * @pchan: the physical channel utilized by this channel
188 * @txd: active transaction on this channel 191 * @txd: active transaction on this channel
192 * @cfg: slave configuration for this channel
193 * @drq: physical DMA request ID for this channel
189 */ 194 */
190struct owl_dma_vchan { 195struct owl_dma_vchan {
191 struct virt_dma_chan vc; 196 struct virt_dma_chan vc;
192 struct owl_dma_pchan *pchan; 197 struct owl_dma_pchan *pchan;
193 struct owl_dma_txd *txd; 198 struct owl_dma_txd *txd;
199 struct dma_slave_config cfg;
200 u8 drq;
194}; 201};
195 202
196/** 203/**
@@ -200,6 +207,7 @@ struct owl_dma_vchan {
200 * @clk: clock for the DMA controller 207 * @clk: clock for the DMA controller
201 * @lock: a lock to use when change DMA controller global register 208 * @lock: a lock to use when change DMA controller global register
202 * @lli_pool: a pool for the LLI descriptors 209 * @lli_pool: a pool for the LLI descriptors
210 * @irq: interrupt ID for the DMA controller
203 * @nr_pchans: the number of physical channels 211 * @nr_pchans: the number of physical channels
204 * @pchans: array of data for the physical channels 212 * @pchans: array of data for the physical channels
205 * @nr_vchans: the number of physical channels 213 * @nr_vchans: the number of physical channels
@@ -336,9 +344,11 @@ static struct owl_dma_lli *owl_dma_alloc_lli(struct owl_dma *od)
336 344
337static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd, 345static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd,
338 struct owl_dma_lli *prev, 346 struct owl_dma_lli *prev,
339 struct owl_dma_lli *next) 347 struct owl_dma_lli *next,
348 bool is_cyclic)
340{ 349{
341 list_add_tail(&next->node, &txd->lli_list); 350 if (!is_cyclic)
351 list_add_tail(&next->node, &txd->lli_list);
342 352
343 if (prev) { 353 if (prev) {
344 prev->hw.next_lli = next->phys; 354 prev->hw.next_lli = next->phys;
@@ -351,7 +361,9 @@ static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd,
351static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan, 361static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
352 struct owl_dma_lli *lli, 362 struct owl_dma_lli *lli,
353 dma_addr_t src, dma_addr_t dst, 363 dma_addr_t src, dma_addr_t dst,
354 u32 len, enum dma_transfer_direction dir) 364 u32 len, enum dma_transfer_direction dir,
365 struct dma_slave_config *sconfig,
366 bool is_cyclic)
355{ 367{
356 struct owl_dma_lli_hw *hw = &lli->hw; 368 struct owl_dma_lli_hw *hw = &lli->hw;
357 u32 mode; 369 u32 mode;
@@ -365,6 +377,32 @@ static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
365 OWL_DMA_MODE_DAM_INC; 377 OWL_DMA_MODE_DAM_INC;
366 378
367 break; 379 break;
380 case DMA_MEM_TO_DEV:
381 mode |= OWL_DMA_MODE_TS(vchan->drq)
382 | OWL_DMA_MODE_ST_DCU | OWL_DMA_MODE_DT_DEV
383 | OWL_DMA_MODE_SAM_INC | OWL_DMA_MODE_DAM_CONST;
384
385 /*
386 * Hardware only supports 32bit and 8bit buswidth. Since the
387 * default is 32bit, select 8bit only when requested.
388 */
389 if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE)
390 mode |= OWL_DMA_MODE_NDDBW_8BIT;
391
392 break;
393 case DMA_DEV_TO_MEM:
394 mode |= OWL_DMA_MODE_TS(vchan->drq)
395 | OWL_DMA_MODE_ST_DEV | OWL_DMA_MODE_DT_DCU
396 | OWL_DMA_MODE_SAM_CONST | OWL_DMA_MODE_DAM_INC;
397
398 /*
399 * Hardware only supports 32bit and 8bit buswidth. Since the
400 * default is 32bit, select 8bit only when requested.
401 */
402 if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE)
403 mode |= OWL_DMA_MODE_NDDBW_8BIT;
404
405 break;
368 default: 406 default:
369 return -EINVAL; 407 return -EINVAL;
370 } 408 }
@@ -381,7 +419,10 @@ static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
381 OWL_DMA_LLC_SAV_LOAD_NEXT | 419 OWL_DMA_LLC_SAV_LOAD_NEXT |
382 OWL_DMA_LLC_DAV_LOAD_NEXT); 420 OWL_DMA_LLC_DAV_LOAD_NEXT);
383 421
384 hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK); 422 if (is_cyclic)
423 hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_BLOCK);
424 else
425 hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK);
385 426
386 return 0; 427 return 0;
387} 428}
@@ -443,6 +484,16 @@ static void owl_dma_terminate_pchan(struct owl_dma *od,
443 spin_unlock_irqrestore(&od->lock, flags); 484 spin_unlock_irqrestore(&od->lock, flags);
444} 485}
445 486
487static void owl_dma_pause_pchan(struct owl_dma_pchan *pchan)
488{
489 pchan_writel(pchan, 1, OWL_DMAX_PAUSE);
490}
491
492static void owl_dma_resume_pchan(struct owl_dma_pchan *pchan)
493{
494 pchan_writel(pchan, 0, OWL_DMAX_PAUSE);
495}
496
446static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan) 497static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan)
447{ 498{
448 struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); 499 struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
@@ -464,7 +515,10 @@ static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan)
464 lli = list_first_entry(&txd->lli_list, 515 lli = list_first_entry(&txd->lli_list,
465 struct owl_dma_lli, node); 516 struct owl_dma_lli, node);
466 517
467 int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK; 518 if (txd->cyclic)
519 int_ctl = OWL_DMA_INTCTL_BLOCK;
520 else
521 int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK;
468 522
469 pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME); 523 pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME);
470 pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL, 524 pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL,
@@ -627,6 +681,54 @@ static int owl_dma_terminate_all(struct dma_chan *chan)
627 return 0; 681 return 0;
628} 682}
629 683
684static int owl_dma_config(struct dma_chan *chan,
685 struct dma_slave_config *config)
686{
687 struct owl_dma_vchan *vchan = to_owl_vchan(chan);
688
689 /* Reject definitely invalid configurations */
690 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
691 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
692 return -EINVAL;
693
694 memcpy(&vchan->cfg, config, sizeof(struct dma_slave_config));
695
696 return 0;
697}
698
699static int owl_dma_pause(struct dma_chan *chan)
700{
701 struct owl_dma_vchan *vchan = to_owl_vchan(chan);
702 unsigned long flags;
703
704 spin_lock_irqsave(&vchan->vc.lock, flags);
705
706 owl_dma_pause_pchan(vchan->pchan);
707
708 spin_unlock_irqrestore(&vchan->vc.lock, flags);
709
710 return 0;
711}
712
713static int owl_dma_resume(struct dma_chan *chan)
714{
715 struct owl_dma_vchan *vchan = to_owl_vchan(chan);
716 unsigned long flags;
717
718 if (!vchan->pchan && !vchan->txd)
719 return 0;
720
721 dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
722
723 spin_lock_irqsave(&vchan->vc.lock, flags);
724
725 owl_dma_resume_pchan(vchan->pchan);
726
727 spin_unlock_irqrestore(&vchan->vc.lock, flags);
728
729 return 0;
730}
731
630static u32 owl_dma_getbytes_chan(struct owl_dma_vchan *vchan) 732static u32 owl_dma_getbytes_chan(struct owl_dma_vchan *vchan)
631{ 733{
632 struct owl_dma_pchan *pchan; 734 struct owl_dma_pchan *pchan;
@@ -754,13 +856,14 @@ static struct dma_async_tx_descriptor
754 bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH); 856 bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH);
755 857
756 ret = owl_dma_cfg_lli(vchan, lli, src + offset, dst + offset, 858 ret = owl_dma_cfg_lli(vchan, lli, src + offset, dst + offset,
757 bytes, DMA_MEM_TO_MEM); 859 bytes, DMA_MEM_TO_MEM,
860 &vchan->cfg, txd->cyclic);
758 if (ret) { 861 if (ret) {
759 dev_warn(chan2dev(chan), "failed to config lli\n"); 862 dev_warn(chan2dev(chan), "failed to config lli\n");
760 goto err_txd_free; 863 goto err_txd_free;
761 } 864 }
762 865
763 prev = owl_dma_add_lli(txd, prev, lli); 866 prev = owl_dma_add_lli(txd, prev, lli, false);
764 } 867 }
765 868
766 return vchan_tx_prep(&vchan->vc, &txd->vd, flags); 869 return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
@@ -770,6 +873,133 @@ err_txd_free:
770 return NULL; 873 return NULL;
771} 874}
772 875
876static struct dma_async_tx_descriptor
877 *owl_dma_prep_slave_sg(struct dma_chan *chan,
878 struct scatterlist *sgl,
879 unsigned int sg_len,
880 enum dma_transfer_direction dir,
881 unsigned long flags, void *context)
882{
883 struct owl_dma *od = to_owl_dma(chan->device);
884 struct owl_dma_vchan *vchan = to_owl_vchan(chan);
885 struct dma_slave_config *sconfig = &vchan->cfg;
886 struct owl_dma_txd *txd;
887 struct owl_dma_lli *lli, *prev = NULL;
888 struct scatterlist *sg;
889 dma_addr_t addr, src = 0, dst = 0;
890 size_t len;
891 int ret, i;
892
893 txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
894 if (!txd)
895 return NULL;
896
897 INIT_LIST_HEAD(&txd->lli_list);
898
899 for_each_sg(sgl, sg, sg_len, i) {
900 addr = sg_dma_address(sg);
901 len = sg_dma_len(sg);
902
903 if (len > OWL_DMA_FRAME_MAX_LENGTH) {
904 dev_err(od->dma.dev,
905 "frame length exceeds max supported length");
906 goto err_txd_free;
907 }
908
909 lli = owl_dma_alloc_lli(od);
910 if (!lli) {
911 dev_err(chan2dev(chan), "failed to allocate lli");
912 goto err_txd_free;
913 }
914
915 if (dir == DMA_MEM_TO_DEV) {
916 src = addr;
917 dst = sconfig->dst_addr;
918 } else {
919 src = sconfig->src_addr;
920 dst = addr;
921 }
922
923 ret = owl_dma_cfg_lli(vchan, lli, src, dst, len, dir, sconfig,
924 txd->cyclic);
925 if (ret) {
926 dev_warn(chan2dev(chan), "failed to config lli");
927 goto err_txd_free;
928 }
929
930 prev = owl_dma_add_lli(txd, prev, lli, false);
931 }
932
933 return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
934
935err_txd_free:
936 owl_dma_free_txd(od, txd);
937
938 return NULL;
939}
940
941static struct dma_async_tx_descriptor
942 *owl_prep_dma_cyclic(struct dma_chan *chan,
943 dma_addr_t buf_addr, size_t buf_len,
944 size_t period_len,
945 enum dma_transfer_direction dir,
946 unsigned long flags)
947{
948 struct owl_dma *od = to_owl_dma(chan->device);
949 struct owl_dma_vchan *vchan = to_owl_vchan(chan);
950 struct dma_slave_config *sconfig = &vchan->cfg;
951 struct owl_dma_txd *txd;
952 struct owl_dma_lli *lli, *prev = NULL, *first = NULL;
953 dma_addr_t src = 0, dst = 0;
954 unsigned int periods = buf_len / period_len;
955 int ret, i;
956
957 txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
958 if (!txd)
959 return NULL;
960
961 INIT_LIST_HEAD(&txd->lli_list);
962 txd->cyclic = true;
963
964 for (i = 0; i < periods; i++) {
965 lli = owl_dma_alloc_lli(od);
966 if (!lli) {
967 dev_warn(chan2dev(chan), "failed to allocate lli");
968 goto err_txd_free;
969 }
970
971 if (dir == DMA_MEM_TO_DEV) {
972 src = buf_addr + (period_len * i);
973 dst = sconfig->dst_addr;
974 } else if (dir == DMA_DEV_TO_MEM) {
975 src = sconfig->src_addr;
976 dst = buf_addr + (period_len * i);
977 }
978
979 ret = owl_dma_cfg_lli(vchan, lli, src, dst, period_len,
980 dir, sconfig, txd->cyclic);
981 if (ret) {
982 dev_warn(chan2dev(chan), "failed to config lli");
983 goto err_txd_free;
984 }
985
986 if (!first)
987 first = lli;
988
989 prev = owl_dma_add_lli(txd, prev, lli, false);
990 }
991
992 /* close the cyclic list */
993 owl_dma_add_lli(txd, prev, first, true);
994
995 return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
996
997err_txd_free:
998 owl_dma_free_txd(od, txd);
999
1000 return NULL;
1001}
1002
773static void owl_dma_free_chan_resources(struct dma_chan *chan) 1003static void owl_dma_free_chan_resources(struct dma_chan *chan)
774{ 1004{
775 struct owl_dma_vchan *vchan = to_owl_vchan(chan); 1005 struct owl_dma_vchan *vchan = to_owl_vchan(chan);
@@ -790,6 +1020,27 @@ static inline void owl_dma_free(struct owl_dma *od)
790 } 1020 }
791} 1021}
792 1022
1023static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec,
1024 struct of_dma *ofdma)
1025{
1026 struct owl_dma *od = ofdma->of_dma_data;
1027 struct owl_dma_vchan *vchan;
1028 struct dma_chan *chan;
1029 u8 drq = dma_spec->args[0];
1030
1031 if (drq > od->nr_vchans)
1032 return NULL;
1033
1034 chan = dma_get_any_slave_channel(&od->dma);
1035 if (!chan)
1036 return NULL;
1037
1038 vchan = to_owl_vchan(chan);
1039 vchan->drq = drq;
1040
1041 return chan;
1042}
1043
793static int owl_dma_probe(struct platform_device *pdev) 1044static int owl_dma_probe(struct platform_device *pdev)
794{ 1045{
795 struct device_node *np = pdev->dev.of_node; 1046 struct device_node *np = pdev->dev.of_node;
@@ -833,12 +1084,19 @@ static int owl_dma_probe(struct platform_device *pdev)
833 spin_lock_init(&od->lock); 1084 spin_lock_init(&od->lock);
834 1085
835 dma_cap_set(DMA_MEMCPY, od->dma.cap_mask); 1086 dma_cap_set(DMA_MEMCPY, od->dma.cap_mask);
1087 dma_cap_set(DMA_SLAVE, od->dma.cap_mask);
1088 dma_cap_set(DMA_CYCLIC, od->dma.cap_mask);
836 1089
837 od->dma.dev = &pdev->dev; 1090 od->dma.dev = &pdev->dev;
838 od->dma.device_free_chan_resources = owl_dma_free_chan_resources; 1091 od->dma.device_free_chan_resources = owl_dma_free_chan_resources;
839 od->dma.device_tx_status = owl_dma_tx_status; 1092 od->dma.device_tx_status = owl_dma_tx_status;
840 od->dma.device_issue_pending = owl_dma_issue_pending; 1093 od->dma.device_issue_pending = owl_dma_issue_pending;
841 od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy; 1094 od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy;
1095 od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg;
1096 od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic;
1097 od->dma.device_config = owl_dma_config;
1098 od->dma.device_pause = owl_dma_pause;
1099 od->dma.device_resume = owl_dma_resume;
842 od->dma.device_terminate_all = owl_dma_terminate_all; 1100 od->dma.device_terminate_all = owl_dma_terminate_all;
843 od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1101 od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
844 od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1102 od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
@@ -910,8 +1168,18 @@ static int owl_dma_probe(struct platform_device *pdev)
910 goto err_pool_free; 1168 goto err_pool_free;
911 } 1169 }
912 1170
1171 /* Device-tree DMA controller registration */
1172 ret = of_dma_controller_register(pdev->dev.of_node,
1173 owl_dma_of_xlate, od);
1174 if (ret) {
1175 dev_err(&pdev->dev, "of_dma_controller_register failed\n");
1176 goto err_dma_unregister;
1177 }
1178
913 return 0; 1179 return 0;
914 1180
1181err_dma_unregister:
1182 dma_async_device_unregister(&od->dma);
915err_pool_free: 1183err_pool_free:
916 clk_disable_unprepare(od->clk); 1184 clk_disable_unprepare(od->clk);
917 dma_pool_destroy(od->lli_pool); 1185 dma_pool_destroy(od->lli_pool);
@@ -923,6 +1191,7 @@ static int owl_dma_remove(struct platform_device *pdev)
923{ 1191{
924 struct owl_dma *od = platform_get_drvdata(pdev); 1192 struct owl_dma *od = platform_get_drvdata(pdev);
925 1193
1194 of_dma_controller_free(pdev->dev.of_node);
926 dma_async_device_unregister(&od->dma); 1195 dma_async_device_unregister(&od->dma);
927 1196
928 /* Mask all interrupts for this execution environment */ 1197 /* Mask all interrupts for this execution environment */