diff options
Diffstat (limited to 'drivers/dma/altera-msgdma.c')
-rw-r--r-- | drivers/dma/altera-msgdma.c | 41 |
1 files changed, 24 insertions, 17 deletions
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c index 32905d5606ac..55f9c62ee54b 100644 --- a/drivers/dma/altera-msgdma.c +++ b/drivers/dma/altera-msgdma.c | |||
@@ -212,11 +212,12 @@ struct msgdma_device { | |||
212 | static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) | 212 | static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) |
213 | { | 213 | { |
214 | struct msgdma_sw_desc *desc; | 214 | struct msgdma_sw_desc *desc; |
215 | unsigned long flags; | ||
215 | 216 | ||
216 | spin_lock_bh(&mdev->lock); | 217 | spin_lock_irqsave(&mdev->lock, flags); |
217 | desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); | 218 | desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); |
218 | list_del(&desc->node); | 219 | list_del(&desc->node); |
219 | spin_unlock_bh(&mdev->lock); | 220 | spin_unlock_irqrestore(&mdev->lock, flags); |
220 | 221 | ||
221 | INIT_LIST_HEAD(&desc->tx_list); | 222 | INIT_LIST_HEAD(&desc->tx_list); |
222 | 223 | ||
@@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
306 | struct msgdma_device *mdev = to_mdev(tx->chan); | 307 | struct msgdma_device *mdev = to_mdev(tx->chan); |
307 | struct msgdma_sw_desc *new; | 308 | struct msgdma_sw_desc *new; |
308 | dma_cookie_t cookie; | 309 | dma_cookie_t cookie; |
310 | unsigned long flags; | ||
309 | 311 | ||
310 | new = tx_to_desc(tx); | 312 | new = tx_to_desc(tx); |
311 | spin_lock_bh(&mdev->lock); | 313 | spin_lock_irqsave(&mdev->lock, flags); |
312 | cookie = dma_cookie_assign(tx); | 314 | cookie = dma_cookie_assign(tx); |
313 | 315 | ||
314 | list_add_tail(&new->node, &mdev->pending_list); | 316 | list_add_tail(&new->node, &mdev->pending_list); |
315 | spin_unlock_bh(&mdev->lock); | 317 | spin_unlock_irqrestore(&mdev->lock, flags); |
316 | 318 | ||
317 | return cookie; | 319 | return cookie; |
318 | } | 320 | } |
@@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, | |||
336 | struct msgdma_extended_desc *desc; | 338 | struct msgdma_extended_desc *desc; |
337 | size_t copy; | 339 | size_t copy; |
338 | u32 desc_cnt; | 340 | u32 desc_cnt; |
341 | unsigned long irqflags; | ||
339 | 342 | ||
340 | desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); | 343 | desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); |
341 | 344 | ||
342 | spin_lock_bh(&mdev->lock); | 345 | spin_lock_irqsave(&mdev->lock, irqflags); |
343 | if (desc_cnt > mdev->desc_free_cnt) { | 346 | if (desc_cnt > mdev->desc_free_cnt) { |
344 | spin_unlock_bh(&mdev->lock); | 347 | spin_unlock_irqrestore(&mdev->lock, irqflags); |
345 | dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); | 348 | dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); |
346 | return NULL; | 349 | return NULL; |
347 | } | 350 | } |
348 | mdev->desc_free_cnt -= desc_cnt; | 351 | mdev->desc_free_cnt -= desc_cnt; |
349 | spin_unlock_bh(&mdev->lock); | 352 | spin_unlock_irqrestore(&mdev->lock, irqflags); |
350 | 353 | ||
351 | do { | 354 | do { |
352 | /* Allocate and populate the descriptor */ | 355 | /* Allocate and populate the descriptor */ |
@@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, | |||
397 | u32 desc_cnt = 0, i; | 400 | u32 desc_cnt = 0, i; |
398 | struct scatterlist *sg; | 401 | struct scatterlist *sg; |
399 | u32 stride; | 402 | u32 stride; |
403 | unsigned long irqflags; | ||
400 | 404 | ||
401 | for_each_sg(sgl, sg, sg_len, i) | 405 | for_each_sg(sgl, sg, sg_len, i) |
402 | desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); | 406 | desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); |
403 | 407 | ||
404 | spin_lock_bh(&mdev->lock); | 408 | spin_lock_irqsave(&mdev->lock, irqflags); |
405 | if (desc_cnt > mdev->desc_free_cnt) { | 409 | if (desc_cnt > mdev->desc_free_cnt) { |
406 | spin_unlock_bh(&mdev->lock); | 410 | spin_unlock_irqrestore(&mdev->lock, irqflags); |
407 | dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); | 411 | dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); |
408 | return NULL; | 412 | return NULL; |
409 | } | 413 | } |
410 | mdev->desc_free_cnt -= desc_cnt; | 414 | mdev->desc_free_cnt -= desc_cnt; |
411 | spin_unlock_bh(&mdev->lock); | 415 | spin_unlock_irqrestore(&mdev->lock, irqflags); |
412 | 416 | ||
413 | avail = sg_dma_len(sgl); | 417 | avail = sg_dma_len(sgl); |
414 | 418 | ||
@@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev) | |||
566 | static void msgdma_issue_pending(struct dma_chan *chan) | 570 | static void msgdma_issue_pending(struct dma_chan *chan) |
567 | { | 571 | { |
568 | struct msgdma_device *mdev = to_mdev(chan); | 572 | struct msgdma_device *mdev = to_mdev(chan); |
573 | unsigned long flags; | ||
569 | 574 | ||
570 | spin_lock_bh(&mdev->lock); | 575 | spin_lock_irqsave(&mdev->lock, flags); |
571 | msgdma_start_transfer(mdev); | 576 | msgdma_start_transfer(mdev); |
572 | spin_unlock_bh(&mdev->lock); | 577 | spin_unlock_irqrestore(&mdev->lock, flags); |
573 | } | 578 | } |
574 | 579 | ||
575 | /** | 580 | /** |
@@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev) | |||
634 | static void msgdma_free_chan_resources(struct dma_chan *dchan) | 639 | static void msgdma_free_chan_resources(struct dma_chan *dchan) |
635 | { | 640 | { |
636 | struct msgdma_device *mdev = to_mdev(dchan); | 641 | struct msgdma_device *mdev = to_mdev(dchan); |
642 | unsigned long flags; | ||
637 | 643 | ||
638 | spin_lock_bh(&mdev->lock); | 644 | spin_lock_irqsave(&mdev->lock, flags); |
639 | msgdma_free_descriptors(mdev); | 645 | msgdma_free_descriptors(mdev); |
640 | spin_unlock_bh(&mdev->lock); | 646 | spin_unlock_irqrestore(&mdev->lock, flags); |
641 | kfree(mdev->sw_desq); | 647 | kfree(mdev->sw_desq); |
642 | } | 648 | } |
643 | 649 | ||
@@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data) | |||
682 | u32 count; | 688 | u32 count; |
683 | u32 __maybe_unused size; | 689 | u32 __maybe_unused size; |
684 | u32 __maybe_unused status; | 690 | u32 __maybe_unused status; |
691 | unsigned long flags; | ||
685 | 692 | ||
686 | spin_lock(&mdev->lock); | 693 | spin_lock_irqsave(&mdev->lock, flags); |
687 | 694 | ||
688 | /* Read number of responses that are available */ | 695 | /* Read number of responses that are available */ |
689 | count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); | 696 | count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); |
@@ -698,13 +705,13 @@ static void msgdma_tasklet(unsigned long data) | |||
698 | * bits. So we need to just drop these values. | 705 | * bits. So we need to just drop these values. |
699 | */ | 706 | */ |
700 | size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); | 707 | size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); |
701 | status = ioread32(mdev->resp - MSGDMA_RESP_STATUS); | 708 | status = ioread32(mdev->resp + MSGDMA_RESP_STATUS); |
702 | 709 | ||
703 | msgdma_complete_descriptor(mdev); | 710 | msgdma_complete_descriptor(mdev); |
704 | msgdma_chan_desc_cleanup(mdev); | 711 | msgdma_chan_desc_cleanup(mdev); |
705 | } | 712 | } |
706 | 713 | ||
707 | spin_unlock(&mdev->lock); | 714 | spin_unlock_irqrestore(&mdev->lock, flags); |
708 | } | 715 | } |
709 | 716 | ||
710 | /** | 717 | /** |