diff options
author | Peter Ujfalusi <peter.ujfalusi@ti.com> | 2015-11-16 06:09:03 -0500 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2015-12-05 05:06:32 -0500 |
commit | 27bc944ca39ff1ed69bc48a38dc057e15ea3d1c0 (patch) | |
tree | 1f720f5e564845062cd9eeb911ad427d95b37453 /drivers/dma/bcm2835-dma.c | |
parent | ef10b0b24143238c4457e0e60ec230b0fcc342a4 (diff) |
dmaengine: bcm2835-dma: Convert to use DMA pool
f93178291712 dmaengine: bcm2835-dma: Fix memory leak when stopping a
running transfer
Fixed the memleak, but introduced another issue: the terminate_all callback
might be called with interrupts disabled and the dma_free_coherent() is
not allowed to be called when IRQs are disabled.
Convert the driver to use dma_pool_* for managing the list of control
blocks for the transfer.
Fixes: f93178291712 ("dmaengine: bcm2835-dma: Fix memory leak when stopping a running transfer")
Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
Tested-by: Matthias Reichl <hias@horus.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/bcm2835-dma.c')
-rw-r--r-- | drivers/dma/bcm2835-dma.c | 78 |
1 files changed, 54 insertions, 24 deletions
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index c92d6a70ccf3..996c4b00d323 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c | |||
@@ -31,6 +31,7 @@ | |||
31 | */ | 31 | */ |
32 | #include <linux/dmaengine.h> | 32 | #include <linux/dmaengine.h> |
33 | #include <linux/dma-mapping.h> | 33 | #include <linux/dma-mapping.h> |
34 | #include <linux/dmapool.h> | ||
34 | #include <linux/err.h> | 35 | #include <linux/err.h> |
35 | #include <linux/init.h> | 36 | #include <linux/init.h> |
36 | #include <linux/interrupt.h> | 37 | #include <linux/interrupt.h> |
@@ -62,6 +63,11 @@ struct bcm2835_dma_cb { | |||
62 | uint32_t pad[2]; | 63 | uint32_t pad[2]; |
63 | }; | 64 | }; |
64 | 65 | ||
66 | struct bcm2835_cb_entry { | ||
67 | struct bcm2835_dma_cb *cb; | ||
68 | dma_addr_t paddr; | ||
69 | }; | ||
70 | |||
65 | struct bcm2835_chan { | 71 | struct bcm2835_chan { |
66 | struct virt_dma_chan vc; | 72 | struct virt_dma_chan vc; |
67 | struct list_head node; | 73 | struct list_head node; |
@@ -72,18 +78,18 @@ struct bcm2835_chan { | |||
72 | 78 | ||
73 | int ch; | 79 | int ch; |
74 | struct bcm2835_desc *desc; | 80 | struct bcm2835_desc *desc; |
81 | struct dma_pool *cb_pool; | ||
75 | 82 | ||
76 | void __iomem *chan_base; | 83 | void __iomem *chan_base; |
77 | int irq_number; | 84 | int irq_number; |
78 | }; | 85 | }; |
79 | 86 | ||
80 | struct bcm2835_desc { | 87 | struct bcm2835_desc { |
88 | struct bcm2835_chan *c; | ||
81 | struct virt_dma_desc vd; | 89 | struct virt_dma_desc vd; |
82 | enum dma_transfer_direction dir; | 90 | enum dma_transfer_direction dir; |
83 | 91 | ||
84 | unsigned int control_block_size; | 92 | struct bcm2835_cb_entry *cb_list; |
85 | struct bcm2835_dma_cb *control_block_base; | ||
86 | dma_addr_t control_block_base_phys; | ||
87 | 93 | ||
88 | unsigned int frames; | 94 | unsigned int frames; |
89 | size_t size; | 95 | size_t size; |
@@ -143,10 +149,13 @@ static inline struct bcm2835_desc *to_bcm2835_dma_desc( | |||
143 | static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) | 149 | static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) |
144 | { | 150 | { |
145 | struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd); | 151 | struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd); |
146 | dma_free_coherent(desc->vd.tx.chan->device->dev, | 152 | int i; |
147 | desc->control_block_size, | 153 | |
148 | desc->control_block_base, | 154 | for (i = 0; i < desc->frames; i++) |
149 | desc->control_block_base_phys); | 155 | dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb, |
156 | desc->cb_list[i].paddr); | ||
157 | |||
158 | kfree(desc->cb_list); | ||
150 | kfree(desc); | 159 | kfree(desc); |
151 | } | 160 | } |
152 | 161 | ||
@@ -199,7 +208,7 @@ static void bcm2835_dma_start_desc(struct bcm2835_chan *c) | |||
199 | 208 | ||
200 | c->desc = d = to_bcm2835_dma_desc(&vd->tx); | 209 | c->desc = d = to_bcm2835_dma_desc(&vd->tx); |
201 | 210 | ||
202 | writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR); | 211 | writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR); |
203 | writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); | 212 | writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); |
204 | } | 213 | } |
205 | 214 | ||
@@ -232,9 +241,16 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) | |||
232 | static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) | 241 | static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) |
233 | { | 242 | { |
234 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | 243 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); |
244 | struct device *dev = c->vc.chan.device->dev; | ||
245 | |||
246 | dev_dbg(dev, "Allocating DMA channel %d\n", c->ch); | ||
235 | 247 | ||
236 | dev_dbg(c->vc.chan.device->dev, | 248 | c->cb_pool = dma_pool_create(dev_name(dev), dev, |
237 | "Allocating DMA channel %d\n", c->ch); | 249 | sizeof(struct bcm2835_dma_cb), 0, 0); |
250 | if (!c->cb_pool) { | ||
251 | dev_err(dev, "unable to allocate descriptor pool\n"); | ||
252 | return -ENOMEM; | ||
253 | } | ||
238 | 254 | ||
239 | return request_irq(c->irq_number, | 255 | return request_irq(c->irq_number, |
240 | bcm2835_dma_callback, 0, "DMA IRQ", c); | 256 | bcm2835_dma_callback, 0, "DMA IRQ", c); |
@@ -246,6 +262,7 @@ static void bcm2835_dma_free_chan_resources(struct dma_chan *chan) | |||
246 | 262 | ||
247 | vchan_free_chan_resources(&c->vc); | 263 | vchan_free_chan_resources(&c->vc); |
248 | free_irq(c->irq_number, c); | 264 | free_irq(c->irq_number, c); |
265 | dma_pool_destroy(c->cb_pool); | ||
249 | 266 | ||
250 | dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch); | 267 | dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch); |
251 | } | 268 | } |
@@ -261,8 +278,7 @@ static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr) | |||
261 | size_t size; | 278 | size_t size; |
262 | 279 | ||
263 | for (size = i = 0; i < d->frames; i++) { | 280 | for (size = i = 0; i < d->frames; i++) { |
264 | struct bcm2835_dma_cb *control_block = | 281 | struct bcm2835_dma_cb *control_block = d->cb_list[i].cb; |
265 | &d->control_block_base[i]; | ||
266 | size_t this_size = control_block->length; | 282 | size_t this_size = control_block->length; |
267 | dma_addr_t dma; | 283 | dma_addr_t dma; |
268 | 284 | ||
@@ -343,6 +359,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( | |||
343 | dma_addr_t dev_addr; | 359 | dma_addr_t dev_addr; |
344 | unsigned int es, sync_type; | 360 | unsigned int es, sync_type; |
345 | unsigned int frame; | 361 | unsigned int frame; |
362 | int i; | ||
346 | 363 | ||
347 | /* Grab configuration */ | 364 | /* Grab configuration */ |
348 | if (!is_slave_direction(direction)) { | 365 | if (!is_slave_direction(direction)) { |
@@ -374,27 +391,31 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( | |||
374 | if (!d) | 391 | if (!d) |
375 | return NULL; | 392 | return NULL; |
376 | 393 | ||
394 | d->c = c; | ||
377 | d->dir = direction; | 395 | d->dir = direction; |
378 | d->frames = buf_len / period_len; | 396 | d->frames = buf_len / period_len; |
379 | 397 | ||
380 | /* Allocate memory for control blocks */ | 398 | d->cb_list = kcalloc(d->frames, sizeof(*d->cb_list), GFP_KERNEL); |
381 | d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb); | 399 | if (!d->cb_list) { |
382 | d->control_block_base = dma_zalloc_coherent(chan->device->dev, | ||
383 | d->control_block_size, &d->control_block_base_phys, | ||
384 | GFP_NOWAIT); | ||
385 | |||
386 | if (!d->control_block_base) { | ||
387 | kfree(d); | 400 | kfree(d); |
388 | return NULL; | 401 | return NULL; |
389 | } | 402 | } |
403 | /* Allocate memory for control blocks */ | ||
404 | for (i = 0; i < d->frames; i++) { | ||
405 | struct bcm2835_cb_entry *cb_entry = &d->cb_list[i]; | ||
406 | |||
407 | cb_entry->cb = dma_pool_zalloc(c->cb_pool, GFP_ATOMIC, | ||
408 | &cb_entry->paddr); | ||
409 | if (!cb_entry->cb) | ||
410 | goto error_cb; | ||
411 | } | ||
390 | 412 | ||
391 | /* | 413 | /* |
392 | * Iterate over all frames, create a control block | 414 | * Iterate over all frames, create a control block |
393 | * for each frame and link them together. | 415 | * for each frame and link them together. |
394 | */ | 416 | */ |
395 | for (frame = 0; frame < d->frames; frame++) { | 417 | for (frame = 0; frame < d->frames; frame++) { |
396 | struct bcm2835_dma_cb *control_block = | 418 | struct bcm2835_dma_cb *control_block = d->cb_list[frame].cb; |
397 | &d->control_block_base[frame]; | ||
398 | 419 | ||
399 | /* Setup adresses */ | 420 | /* Setup adresses */ |
400 | if (d->dir == DMA_DEV_TO_MEM) { | 421 | if (d->dir == DMA_DEV_TO_MEM) { |
@@ -428,12 +449,21 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( | |||
428 | * This DMA engine driver currently only supports cyclic DMA. | 449 | * This DMA engine driver currently only supports cyclic DMA. |
429 | * Therefore, wrap around at number of frames. | 450 | * Therefore, wrap around at number of frames. |
430 | */ | 451 | */ |
431 | control_block->next = d->control_block_base_phys + | 452 | control_block->next = d->cb_list[((frame + 1) % d->frames)].paddr; |
432 | sizeof(struct bcm2835_dma_cb) | ||
433 | * ((frame + 1) % d->frames); | ||
434 | } | 453 | } |
435 | 454 | ||
436 | return vchan_tx_prep(&c->vc, &d->vd, flags); | 455 | return vchan_tx_prep(&c->vc, &d->vd, flags); |
456 | error_cb: | ||
457 | i--; | ||
458 | for (; i >= 0; i--) { | ||
459 | struct bcm2835_cb_entry *cb_entry = &d->cb_list[i]; | ||
460 | |||
461 | dma_pool_free(c->cb_pool, cb_entry->cb, cb_entry->paddr); | ||
462 | } | ||
463 | |||
464 | kfree(d->cb_list); | ||
465 | kfree(d); | ||
466 | return NULL; | ||
437 | } | 467 | } |
438 | 468 | ||
439 | static int bcm2835_dma_slave_config(struct dma_chan *chan, | 469 | static int bcm2835_dma_slave_config(struct dma_chan *chan, |