aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVinod Koul <vinod.koul@intel.com>2016-01-06 04:47:16 -0500
committerVinod Koul <vinod.koul@intel.com>2016-01-06 04:47:16 -0500
commit5eec94388db40ce45bec028af2e2f62df751c887 (patch)
tree42d34b08e2a64fc2fc4d6405756c1e75449ca276
parent0c328de77148ddccaa7a2c31f5751e4d443c213b (diff)
parentd3651b8e5cdf8773a7d74839e53454e4a0d48ffe (diff)
Merge branch 'topic/desc_reuse' into for-linus
-rw-r--r--drivers/dma/dmaengine.c1
-rw-r--r--drivers/dma/pxa_dma.c1
-rw-r--r--drivers/dma/virt-dma.c46
-rw-r--r--drivers/dma/virt-dma.h12
-rw-r--r--include/linux/dmaengine.h2
5 files changed, 56 insertions, 6 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 3ecec1445adf..4aced6689734 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -493,6 +493,7 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
493 caps->dst_addr_widths = device->dst_addr_widths; 493 caps->dst_addr_widths = device->dst_addr_widths;
494 caps->directions = device->directions; 494 caps->directions = device->directions;
495 caps->residue_granularity = device->residue_granularity; 495 caps->residue_granularity = device->residue_granularity;
496 caps->descriptor_reuse = device->descriptor_reuse;
496 497
497 /* 498 /*
498 * Some devices implement only pause (e.g. to get residuum) but no 499 * Some devices implement only pause (e.g. to get residuum) but no
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index fc4156afa070..f2a0310ae771 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -1414,6 +1414,7 @@ static int pxad_probe(struct platform_device *op)
1414 pdev->slave.dst_addr_widths = widths; 1414 pdev->slave.dst_addr_widths = widths;
1415 pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); 1415 pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1416 pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 1416 pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1417 pdev->slave.descriptor_reuse = true;
1417 1418
1418 pdev->slave.dev = &op->dev; 1419 pdev->slave.dev = &op->dev;
1419 ret = pxad_init_dmadev(op, pdev, dma_channels); 1420 ret = pxad_init_dmadev(op, pdev, dma_channels);
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index 6f80432a3f0a..a35c211857dd 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
29 spin_lock_irqsave(&vc->lock, flags); 29 spin_lock_irqsave(&vc->lock, flags);
30 cookie = dma_cookie_assign(tx); 30 cookie = dma_cookie_assign(tx);
31 31
32 list_add_tail(&vd->node, &vc->desc_submitted); 32 list_move_tail(&vd->node, &vc->desc_submitted);
33 spin_unlock_irqrestore(&vc->lock, flags); 33 spin_unlock_irqrestore(&vc->lock, flags);
34 34
35 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", 35 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
@@ -39,6 +39,33 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
39} 39}
40EXPORT_SYMBOL_GPL(vchan_tx_submit); 40EXPORT_SYMBOL_GPL(vchan_tx_submit);
41 41
42/**
43 * vchan_tx_desc_free - free a reusable descriptor
44 * @tx: the transfer
45 *
46 * This function frees a previously allocated reusable descriptor. The only
47 * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
48 * transfer.
49 *
50 * Returns 0 upon success
51 */
52int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
53{
54 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
55 struct virt_dma_desc *vd = to_virt_desc(tx);
56 unsigned long flags;
57
58 spin_lock_irqsave(&vc->lock, flags);
59 list_del(&vd->node);
60 spin_unlock_irqrestore(&vc->lock, flags);
61
62 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
63 vc, vd, vd->tx.cookie);
64 vc->desc_free(vd);
65 return 0;
66}
67EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
68
42struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc, 69struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
43 dma_cookie_t cookie) 70 dma_cookie_t cookie)
44{ 71{
@@ -83,8 +110,10 @@ static void vchan_complete(unsigned long arg)
83 cb_data = vd->tx.callback_param; 110 cb_data = vd->tx.callback_param;
84 111
85 list_del(&vd->node); 112 list_del(&vd->node);
86 113 if (dmaengine_desc_test_reuse(&vd->tx))
87 vc->desc_free(vd); 114 list_add(&vd->node, &vc->desc_allocated);
115 else
116 vc->desc_free(vd);
88 117
89 if (cb) 118 if (cb)
90 cb(cb_data); 119 cb(cb_data);
@@ -96,9 +125,13 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
96 while (!list_empty(head)) { 125 while (!list_empty(head)) {
97 struct virt_dma_desc *vd = list_first_entry(head, 126 struct virt_dma_desc *vd = list_first_entry(head,
98 struct virt_dma_desc, node); 127 struct virt_dma_desc, node);
99 list_del(&vd->node); 128 if (dmaengine_desc_test_reuse(&vd->tx)) {
100 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); 129 list_move_tail(&vd->node, &vc->desc_allocated);
101 vc->desc_free(vd); 130 } else {
131 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
132 list_del(&vd->node);
133 vc->desc_free(vd);
134 }
102 } 135 }
103} 136}
104EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); 137EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
@@ -108,6 +141,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
108 dma_cookie_init(&vc->chan); 141 dma_cookie_init(&vc->chan);
109 142
110 spin_lock_init(&vc->lock); 143 spin_lock_init(&vc->lock);
144 INIT_LIST_HEAD(&vc->desc_allocated);
111 INIT_LIST_HEAD(&vc->desc_submitted); 145 INIT_LIST_HEAD(&vc->desc_submitted);
112 INIT_LIST_HEAD(&vc->desc_issued); 146 INIT_LIST_HEAD(&vc->desc_issued);
113 INIT_LIST_HEAD(&vc->desc_completed); 147 INIT_LIST_HEAD(&vc->desc_completed);
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 2fa47745a41f..bff8c39dd716 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -29,6 +29,7 @@ struct virt_dma_chan {
29 spinlock_t lock; 29 spinlock_t lock;
30 30
31 /* protected by vc.lock */ 31 /* protected by vc.lock */
32 struct list_head desc_allocated;
32 struct list_head desc_submitted; 33 struct list_head desc_submitted;
33 struct list_head desc_issued; 34 struct list_head desc_issued;
34 struct list_head desc_completed; 35 struct list_head desc_completed;
@@ -55,10 +56,17 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
55 struct virt_dma_desc *vd, unsigned long tx_flags) 56 struct virt_dma_desc *vd, unsigned long tx_flags)
56{ 57{
57 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); 58 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
59 extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
60 unsigned long flags;
58 61
59 dma_async_tx_descriptor_init(&vd->tx, &vc->chan); 62 dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
60 vd->tx.flags = tx_flags; 63 vd->tx.flags = tx_flags;
61 vd->tx.tx_submit = vchan_tx_submit; 64 vd->tx.tx_submit = vchan_tx_submit;
65 vd->tx.desc_free = vchan_tx_desc_free;
66
67 spin_lock_irqsave(&vc->lock, flags);
68 list_add_tail(&vd->node, &vc->desc_allocated);
69 spin_unlock_irqrestore(&vc->lock, flags);
62 70
63 return &vd->tx; 71 return &vd->tx;
64} 72}
@@ -134,6 +142,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
134static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, 142static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
135 struct list_head *head) 143 struct list_head *head)
136{ 144{
145 list_splice_tail_init(&vc->desc_allocated, head);
137 list_splice_tail_init(&vc->desc_submitted, head); 146 list_splice_tail_init(&vc->desc_submitted, head);
138 list_splice_tail_init(&vc->desc_issued, head); 147 list_splice_tail_init(&vc->desc_issued, head);
139 list_splice_tail_init(&vc->desc_completed, head); 148 list_splice_tail_init(&vc->desc_completed, head);
@@ -141,11 +150,14 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
141 150
142static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) 151static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
143{ 152{
153 struct virt_dma_desc *vd;
144 unsigned long flags; 154 unsigned long flags;
145 LIST_HEAD(head); 155 LIST_HEAD(head);
146 156
147 spin_lock_irqsave(&vc->lock, flags); 157 spin_lock_irqsave(&vc->lock, flags);
148 vchan_get_all_descriptors(vc, &head); 158 vchan_get_all_descriptors(vc, &head);
159 list_for_each_entry(vd, &head, node)
160 dmaengine_desc_clear_reuse(&vd->tx);
149 spin_unlock_irqrestore(&vc->lock, flags); 161 spin_unlock_irqrestore(&vc->lock, flags);
150 162
151 vchan_dma_desc_free_list(vc, &head); 163 vchan_dma_desc_free_list(vc, &head);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index c47c68e535e8..6f94b5cbd97c 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -659,6 +659,7 @@ enum dmaengine_alignment {
659 * struct with auxiliary transfer status information, otherwise the call 659 * struct with auxiliary transfer status information, otherwise the call
660 * will just return a simple status code 660 * will just return a simple status code
661 * @device_issue_pending: push pending transactions to hardware 661 * @device_issue_pending: push pending transactions to hardware
662 * @descriptor_reuse: a submitted transfer can be resubmitted after completion
662 */ 663 */
663struct dma_device { 664struct dma_device {
664 665
@@ -681,6 +682,7 @@ struct dma_device {
681 u32 src_addr_widths; 682 u32 src_addr_widths;
682 u32 dst_addr_widths; 683 u32 dst_addr_widths;
683 u32 directions; 684 u32 directions;
685 bool descriptor_reuse;
684 enum dma_residue_granularity residue_granularity; 686 enum dma_residue_granularity residue_granularity;
685 687
686 int (*device_alloc_chan_resources)(struct dma_chan *chan); 688 int (*device_alloc_chan_resources)(struct dma_chan *chan);