diff options
author | Jun Nie <jun.nie@linaro.org> | 2015-07-10 08:02:49 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2015-07-31 11:03:43 -0400 |
commit | 8c8fe97b2b8a216523e2faf1ccca66ddab634e3e (patch) | |
tree | 31cb28e7335a1e29a1f86f1e982955a3336318f4 | |
parent | 0ec9ebc706fbd394bc233d87ac7aaad1c4f3ab54 (diff) |
Revert "dmaengine: virt-dma: don't always free descriptor upon completion"
This reverts commit b9855f03d560d351e95301b9de0bc3cad3b31fe9.
The patch break existing DMA usage case. For example, audio SOC
dmaengine never release channel and cause virt-dma to cache too
much memory in descriptor to exhaust system memory.
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r-- | drivers/dma/virt-dma.c | 19 | ||||
-rw-r--r-- | drivers/dma/virt-dma.h | 13 |
2 files changed, 7 insertions, 25 deletions
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index 7d2c17d8d30f..6f80432a3f0a 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c | |||
@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) | |||
29 | spin_lock_irqsave(&vc->lock, flags); | 29 | spin_lock_irqsave(&vc->lock, flags); |
30 | cookie = dma_cookie_assign(tx); | 30 | cookie = dma_cookie_assign(tx); |
31 | 31 | ||
32 | list_move_tail(&vd->node, &vc->desc_submitted); | 32 | list_add_tail(&vd->node, &vc->desc_submitted); |
33 | spin_unlock_irqrestore(&vc->lock, flags); | 33 | spin_unlock_irqrestore(&vc->lock, flags); |
34 | 34 | ||
35 | dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", | 35 | dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", |
@@ -83,10 +83,8 @@ static void vchan_complete(unsigned long arg) | |||
83 | cb_data = vd->tx.callback_param; | 83 | cb_data = vd->tx.callback_param; |
84 | 84 | ||
85 | list_del(&vd->node); | 85 | list_del(&vd->node); |
86 | if (async_tx_test_ack(&vd->tx)) | 86 | |
87 | list_add(&vd->node, &vc->desc_allocated); | 87 | vc->desc_free(vd); |
88 | else | ||
89 | vc->desc_free(vd); | ||
90 | 88 | ||
91 | if (cb) | 89 | if (cb) |
92 | cb(cb_data); | 90 | cb(cb_data); |
@@ -98,13 +96,9 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) | |||
98 | while (!list_empty(head)) { | 96 | while (!list_empty(head)) { |
99 | struct virt_dma_desc *vd = list_first_entry(head, | 97 | struct virt_dma_desc *vd = list_first_entry(head, |
100 | struct virt_dma_desc, node); | 98 | struct virt_dma_desc, node); |
101 | if (async_tx_test_ack(&vd->tx)) { | 99 | list_del(&vd->node); |
102 | list_move_tail(&vd->node, &vc->desc_allocated); | 100 | dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); |
103 | } else { | 101 | vc->desc_free(vd); |
104 | dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); | ||
105 | list_del(&vd->node); | ||
106 | vc->desc_free(vd); | ||
107 | } | ||
108 | } | 102 | } |
109 | } | 103 | } |
110 | EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); | 104 | EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); |
@@ -114,7 +108,6 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) | |||
114 | dma_cookie_init(&vc->chan); | 108 | dma_cookie_init(&vc->chan); |
115 | 109 | ||
116 | spin_lock_init(&vc->lock); | 110 | spin_lock_init(&vc->lock); |
117 | INIT_LIST_HEAD(&vc->desc_allocated); | ||
118 | INIT_LIST_HEAD(&vc->desc_submitted); | 111 | INIT_LIST_HEAD(&vc->desc_submitted); |
119 | INIT_LIST_HEAD(&vc->desc_issued); | 112 | INIT_LIST_HEAD(&vc->desc_issued); |
120 | INIT_LIST_HEAD(&vc->desc_completed); | 113 | INIT_LIST_HEAD(&vc->desc_completed); |
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index 189e75dbcb15..181b95267866 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h | |||
@@ -29,7 +29,6 @@ struct virt_dma_chan { | |||
29 | spinlock_t lock; | 29 | spinlock_t lock; |
30 | 30 | ||
31 | /* protected by vc.lock */ | 31 | /* protected by vc.lock */ |
32 | struct list_head desc_allocated; | ||
33 | struct list_head desc_submitted; | 32 | struct list_head desc_submitted; |
34 | struct list_head desc_issued; | 33 | struct list_head desc_issued; |
35 | struct list_head desc_completed; | 34 | struct list_head desc_completed; |
@@ -56,16 +55,11 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan | |||
56 | struct virt_dma_desc *vd, unsigned long tx_flags) | 55 | struct virt_dma_desc *vd, unsigned long tx_flags) |
57 | { | 56 | { |
58 | extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); | 57 | extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); |
59 | unsigned long flags; | ||
60 | 58 | ||
61 | dma_async_tx_descriptor_init(&vd->tx, &vc->chan); | 59 | dma_async_tx_descriptor_init(&vd->tx, &vc->chan); |
62 | vd->tx.flags = tx_flags; | 60 | vd->tx.flags = tx_flags; |
63 | vd->tx.tx_submit = vchan_tx_submit; | 61 | vd->tx.tx_submit = vchan_tx_submit; |
64 | 62 | ||
65 | spin_lock_irqsave(&vc->lock, flags); | ||
66 | list_add_tail(&vd->node, &vc->desc_allocated); | ||
67 | spin_unlock_irqrestore(&vc->lock, flags); | ||
68 | |||
69 | return &vd->tx; | 63 | return &vd->tx; |
70 | } | 64 | } |
71 | 65 | ||
@@ -128,8 +122,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) | |||
128 | } | 122 | } |
129 | 123 | ||
130 | /** | 124 | /** |
131 | * vchan_get_all_descriptors - obtain all allocated, submitted and issued | 125 | * vchan_get_all_descriptors - obtain all submitted and issued descriptors |
132 | * descriptors | ||
133 | * vc: virtual channel to get descriptors from | 126 | * vc: virtual channel to get descriptors from |
134 | * head: list of descriptors found | 127 | * head: list of descriptors found |
135 | * | 128 | * |
@@ -141,7 +134,6 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) | |||
141 | static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, | 134 | static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, |
142 | struct list_head *head) | 135 | struct list_head *head) |
143 | { | 136 | { |
144 | list_splice_tail_init(&vc->desc_allocated, head); | ||
145 | list_splice_tail_init(&vc->desc_submitted, head); | 137 | list_splice_tail_init(&vc->desc_submitted, head); |
146 | list_splice_tail_init(&vc->desc_issued, head); | 138 | list_splice_tail_init(&vc->desc_issued, head); |
147 | list_splice_tail_init(&vc->desc_completed, head); | 139 | list_splice_tail_init(&vc->desc_completed, head); |
@@ -149,14 +141,11 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, | |||
149 | 141 | ||
150 | static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) | 142 | static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) |
151 | { | 143 | { |
152 | struct virt_dma_desc *vd; | ||
153 | unsigned long flags; | 144 | unsigned long flags; |
154 | LIST_HEAD(head); | 145 | LIST_HEAD(head); |
155 | 146 | ||
156 | spin_lock_irqsave(&vc->lock, flags); | 147 | spin_lock_irqsave(&vc->lock, flags); |
157 | vchan_get_all_descriptors(vc, &head); | 148 | vchan_get_all_descriptors(vc, &head); |
158 | list_for_each_entry(vd, &head, node) | ||
159 | async_tx_clear_ack(&vd->tx); | ||
160 | spin_unlock_irqrestore(&vc->lock, flags); | 149 | spin_unlock_irqrestore(&vc->lock, flags); |
161 | 150 | ||
162 | vchan_dma_desc_free_list(vc, &head); | 151 | vchan_dma_desc_free_list(vc, &head); |