diff options
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/dmaengine.c | 17 | ||||
-rw-r--r-- | drivers/dma/dmatest.c | 4 | ||||
-rw-r--r-- | drivers/dma/fsldma.c | 71 | ||||
-rw-r--r-- | drivers/dma/ioat_dma.c | 47 | ||||
-rw-r--r-- | drivers/dma/ipu/ipu_idmac.c | 7 |
5 files changed, 92 insertions, 54 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 92438e9dacc..5a87384ea4f 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -804,11 +804,14 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | |||
804 | dma_addr_t dma_dest, dma_src; | 804 | dma_addr_t dma_dest, dma_src; |
805 | dma_cookie_t cookie; | 805 | dma_cookie_t cookie; |
806 | int cpu; | 806 | int cpu; |
807 | unsigned long flags; | ||
807 | 808 | ||
808 | dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); | 809 | dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); |
809 | dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); | 810 | dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); |
810 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, | 811 | flags = DMA_CTRL_ACK | |
811 | DMA_CTRL_ACK); | 812 | DMA_COMPL_SRC_UNMAP_SINGLE | |
813 | DMA_COMPL_DEST_UNMAP_SINGLE; | ||
814 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); | ||
812 | 815 | ||
813 | if (!tx) { | 816 | if (!tx) { |
814 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | 817 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); |
@@ -850,11 +853,12 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | |||
850 | dma_addr_t dma_dest, dma_src; | 853 | dma_addr_t dma_dest, dma_src; |
851 | dma_cookie_t cookie; | 854 | dma_cookie_t cookie; |
852 | int cpu; | 855 | int cpu; |
856 | unsigned long flags; | ||
853 | 857 | ||
854 | dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); | 858 | dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); |
855 | dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); | 859 | dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); |
856 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, | 860 | flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE; |
857 | DMA_CTRL_ACK); | 861 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); |
858 | 862 | ||
859 | if (!tx) { | 863 | if (!tx) { |
860 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | 864 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); |
@@ -898,12 +902,13 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |||
898 | dma_addr_t dma_dest, dma_src; | 902 | dma_addr_t dma_dest, dma_src; |
899 | dma_cookie_t cookie; | 903 | dma_cookie_t cookie; |
900 | int cpu; | 904 | int cpu; |
905 | unsigned long flags; | ||
901 | 906 | ||
902 | dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); | 907 | dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); |
903 | dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, | 908 | dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, |
904 | DMA_FROM_DEVICE); | 909 | DMA_FROM_DEVICE); |
905 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, | 910 | flags = DMA_CTRL_ACK; |
906 | DMA_CTRL_ACK); | 911 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); |
907 | 912 | ||
908 | if (!tx) { | 913 | if (!tx) { |
909 | dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); | 914 | dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); |
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index a27c0fb1bc1..fb7da5141e9 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -531,9 +531,7 @@ static int __init dmatest_init(void) | |||
531 | chan = dma_request_channel(mask, filter, NULL); | 531 | chan = dma_request_channel(mask, filter, NULL); |
532 | if (chan) { | 532 | if (chan) { |
533 | err = dmatest_add_channel(chan); | 533 | err = dmatest_add_channel(chan); |
534 | if (err == 0) | 534 | if (err) { |
535 | continue; | ||
536 | else { | ||
537 | dma_release_channel(chan); | 535 | dma_release_channel(chan); |
538 | break; /* add_channel failed, punt */ | 536 | break; /* add_channel failed, punt */ |
539 | } | 537 | } |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index da8a8ed9e41..f18d1bde043 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -179,9 +179,14 @@ static void dma_halt(struct fsl_dma_chan *fsl_chan) | |||
179 | static void set_ld_eol(struct fsl_dma_chan *fsl_chan, | 179 | static void set_ld_eol(struct fsl_dma_chan *fsl_chan, |
180 | struct fsl_desc_sw *desc) | 180 | struct fsl_desc_sw *desc) |
181 | { | 181 | { |
182 | u64 snoop_bits; | ||
183 | |||
184 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) | ||
185 | ? FSL_DMA_SNEN : 0; | ||
186 | |||
182 | desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, | 187 | desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, |
183 | DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL, | 188 | DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL |
184 | 64); | 189 | | snoop_bits, 64); |
185 | } | 190 | } |
186 | 191 | ||
187 | static void append_ld_queue(struct fsl_dma_chan *fsl_chan, | 192 | static void append_ld_queue(struct fsl_dma_chan *fsl_chan, |
@@ -313,8 +318,8 @@ static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) | |||
313 | 318 | ||
314 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | 319 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
315 | { | 320 | { |
316 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); | ||
317 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); | 321 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); |
322 | struct fsl_desc_sw *desc; | ||
318 | unsigned long flags; | 323 | unsigned long flags; |
319 | dma_cookie_t cookie; | 324 | dma_cookie_t cookie; |
320 | 325 | ||
@@ -322,14 +327,17 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
322 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | 327 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); |
323 | 328 | ||
324 | cookie = fsl_chan->common.cookie; | 329 | cookie = fsl_chan->common.cookie; |
325 | cookie++; | 330 | list_for_each_entry(desc, &tx->tx_list, node) { |
326 | if (cookie < 0) | 331 | cookie++; |
327 | cookie = 1; | 332 | if (cookie < 0) |
328 | desc->async_tx.cookie = cookie; | 333 | cookie = 1; |
329 | fsl_chan->common.cookie = desc->async_tx.cookie; | ||
330 | 334 | ||
331 | append_ld_queue(fsl_chan, desc); | 335 | desc->async_tx.cookie = cookie; |
332 | list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev); | 336 | } |
337 | |||
338 | fsl_chan->common.cookie = cookie; | ||
339 | append_ld_queue(fsl_chan, tx_to_fsl_desc(tx)); | ||
340 | list_splice_init(&tx->tx_list, fsl_chan->ld_queue.prev); | ||
333 | 341 | ||
334 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | 342 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); |
335 | 343 | ||
@@ -454,8 +462,8 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
454 | { | 462 | { |
455 | struct fsl_dma_chan *fsl_chan; | 463 | struct fsl_dma_chan *fsl_chan; |
456 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new; | 464 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new; |
465 | struct list_head *list; | ||
457 | size_t copy; | 466 | size_t copy; |
458 | LIST_HEAD(link_chain); | ||
459 | 467 | ||
460 | if (!chan) | 468 | if (!chan) |
461 | return NULL; | 469 | return NULL; |
@@ -472,7 +480,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
472 | if (!new) { | 480 | if (!new) { |
473 | dev_err(fsl_chan->dev, | 481 | dev_err(fsl_chan->dev, |
474 | "No free memory for link descriptor\n"); | 482 | "No free memory for link descriptor\n"); |
475 | return NULL; | 483 | goto fail; |
476 | } | 484 | } |
477 | #ifdef FSL_DMA_LD_DEBUG | 485 | #ifdef FSL_DMA_LD_DEBUG |
478 | dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); | 486 | dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); |
@@ -507,7 +515,19 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
507 | /* Set End-of-link to the last link descriptor of new list*/ | 515 | /* Set End-of-link to the last link descriptor of new list*/ |
508 | set_ld_eol(fsl_chan, new); | 516 | set_ld_eol(fsl_chan, new); |
509 | 517 | ||
510 | return first ? &first->async_tx : NULL; | 518 | return &first->async_tx; |
519 | |||
520 | fail: | ||
521 | if (!first) | ||
522 | return NULL; | ||
523 | |||
524 | list = &first->async_tx.tx_list; | ||
525 | list_for_each_entry_safe_reverse(new, prev, list, node) { | ||
526 | list_del(&new->node); | ||
527 | dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); | ||
528 | } | ||
529 | |||
530 | return NULL; | ||
511 | } | 531 | } |
512 | 532 | ||
513 | /** | 533 | /** |
@@ -598,15 +618,16 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) | |||
598 | dma_addr_t next_dest_addr; | 618 | dma_addr_t next_dest_addr; |
599 | unsigned long flags; | 619 | unsigned long flags; |
600 | 620 | ||
621 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
622 | |||
601 | if (!dma_is_idle(fsl_chan)) | 623 | if (!dma_is_idle(fsl_chan)) |
602 | return; | 624 | goto out_unlock; |
603 | 625 | ||
604 | dma_halt(fsl_chan); | 626 | dma_halt(fsl_chan); |
605 | 627 | ||
606 | /* If there are some link descriptors | 628 | /* If there are some link descriptors |
607 | * not transfered in queue. We need to start it. | 629 | * not transfered in queue. We need to start it. |
608 | */ | 630 | */ |
609 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
610 | 631 | ||
611 | /* Find the first un-transfer desciptor */ | 632 | /* Find the first un-transfer desciptor */ |
612 | for (ld_node = fsl_chan->ld_queue.next; | 633 | for (ld_node = fsl_chan->ld_queue.next; |
@@ -617,19 +638,20 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) | |||
617 | fsl_chan->common.cookie) == DMA_SUCCESS); | 638 | fsl_chan->common.cookie) == DMA_SUCCESS); |
618 | ld_node = ld_node->next); | 639 | ld_node = ld_node->next); |
619 | 640 | ||
620 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
621 | |||
622 | if (ld_node != &fsl_chan->ld_queue) { | 641 | if (ld_node != &fsl_chan->ld_queue) { |
623 | /* Get the ld start address from ld_queue */ | 642 | /* Get the ld start address from ld_queue */ |
624 | next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; | 643 | next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; |
625 | dev_dbg(fsl_chan->dev, "xfer LDs staring from %p\n", | 644 | dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n", |
626 | (void *)next_dest_addr); | 645 | (unsigned long long)next_dest_addr); |
627 | set_cdar(fsl_chan, next_dest_addr); | 646 | set_cdar(fsl_chan, next_dest_addr); |
628 | dma_start(fsl_chan); | 647 | dma_start(fsl_chan); |
629 | } else { | 648 | } else { |
630 | set_cdar(fsl_chan, 0); | 649 | set_cdar(fsl_chan, 0); |
631 | set_ndar(fsl_chan, 0); | 650 | set_ndar(fsl_chan, 0); |
632 | } | 651 | } |
652 | |||
653 | out_unlock: | ||
654 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
633 | } | 655 | } |
634 | 656 | ||
635 | /** | 657 | /** |
@@ -734,8 +756,9 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) | |||
734 | */ | 756 | */ |
735 | if (stat & FSL_DMA_SR_EOSI) { | 757 | if (stat & FSL_DMA_SR_EOSI) { |
736 | dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); | 758 | dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); |
737 | dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n", | 759 | dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n", |
738 | (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan)); | 760 | (unsigned long long)get_cdar(fsl_chan), |
761 | (unsigned long long)get_ndar(fsl_chan)); | ||
739 | stat &= ~FSL_DMA_SR_EOSI; | 762 | stat &= ~FSL_DMA_SR_EOSI; |
740 | update_cookie = 1; | 763 | update_cookie = 1; |
741 | } | 764 | } |
@@ -830,7 +853,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, | |||
830 | new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); | 853 | new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); |
831 | 854 | ||
832 | new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; | 855 | new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; |
833 | if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) { | 856 | if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { |
834 | dev_err(fdev->dev, "There is no %d channel!\n", | 857 | dev_err(fdev->dev, "There is no %d channel!\n", |
835 | new_fsl_chan->id); | 858 | new_fsl_chan->id); |
836 | err = -EINVAL; | 859 | err = -EINVAL; |
@@ -925,8 +948,8 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, | |||
925 | } | 948 | } |
926 | 949 | ||
927 | dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " | 950 | dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " |
928 | "controller at %p...\n", | 951 | "controller at 0x%llx...\n", |
929 | match->compatible, (void *)fdev->reg.start); | 952 | match->compatible, (unsigned long long)fdev->reg.start); |
930 | fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end | 953 | fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end |
931 | - fdev->reg.start + 1); | 954 | - fdev->reg.start + 1); |
932 | 955 | ||
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index e4fc33c1c32..a600fc0f796 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c | |||
@@ -173,7 +173,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) | |||
173 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); | 173 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); |
174 | 174 | ||
175 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL | 175 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL |
176 | if (i7300_idle_platform_probe(NULL, NULL) == 0) { | 176 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) { |
177 | device->common.chancnt--; | 177 | device->common.chancnt--; |
178 | } | 178 | } |
179 | #endif | 179 | #endif |
@@ -1063,22 +1063,31 @@ static void ioat_dma_cleanup_tasklet(unsigned long data) | |||
1063 | static void | 1063 | static void |
1064 | ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) | 1064 | ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) |
1065 | { | 1065 | { |
1066 | /* | 1066 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
1067 | * yes we are unmapping both _page and _single | 1067 | if (desc->async_tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) |
1068 | * alloc'd regions with unmap_page. Is this | 1068 | pci_unmap_single(ioat_chan->device->pdev, |
1069 | * *really* that bad? | 1069 | pci_unmap_addr(desc, dst), |
1070 | */ | 1070 | pci_unmap_len(desc, len), |
1071 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) | 1071 | PCI_DMA_FROMDEVICE); |
1072 | pci_unmap_page(ioat_chan->device->pdev, | 1072 | else |
1073 | pci_unmap_addr(desc, dst), | 1073 | pci_unmap_page(ioat_chan->device->pdev, |
1074 | pci_unmap_len(desc, len), | 1074 | pci_unmap_addr(desc, dst), |
1075 | PCI_DMA_FROMDEVICE); | 1075 | pci_unmap_len(desc, len), |
1076 | 1076 | PCI_DMA_FROMDEVICE); | |
1077 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) | 1077 | } |
1078 | pci_unmap_page(ioat_chan->device->pdev, | 1078 | |
1079 | pci_unmap_addr(desc, src), | 1079 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { |
1080 | pci_unmap_len(desc, len), | 1080 | if (desc->async_tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) |
1081 | PCI_DMA_TODEVICE); | 1081 | pci_unmap_single(ioat_chan->device->pdev, |
1082 | pci_unmap_addr(desc, src), | ||
1083 | pci_unmap_len(desc, len), | ||
1084 | PCI_DMA_TODEVICE); | ||
1085 | else | ||
1086 | pci_unmap_page(ioat_chan->device->pdev, | ||
1087 | pci_unmap_addr(desc, src), | ||
1088 | pci_unmap_len(desc, len), | ||
1089 | PCI_DMA_TODEVICE); | ||
1090 | } | ||
1082 | } | 1091 | } |
1083 | 1092 | ||
1084 | /** | 1093 | /** |
@@ -1363,6 +1372,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device) | |||
1363 | int err = 0; | 1372 | int err = 0; |
1364 | struct completion cmp; | 1373 | struct completion cmp; |
1365 | unsigned long tmo; | 1374 | unsigned long tmo; |
1375 | unsigned long flags; | ||
1366 | 1376 | ||
1367 | src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); | 1377 | src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); |
1368 | if (!src) | 1378 | if (!src) |
@@ -1392,8 +1402,9 @@ static int ioat_dma_self_test(struct ioatdma_device *device) | |||
1392 | DMA_TO_DEVICE); | 1402 | DMA_TO_DEVICE); |
1393 | dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, | 1403 | dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, |
1394 | DMA_FROM_DEVICE); | 1404 | DMA_FROM_DEVICE); |
1405 | flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE; | ||
1395 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, | 1406 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, |
1396 | IOAT_TEST_SIZE, 0); | 1407 | IOAT_TEST_SIZE, flags); |
1397 | if (!tx) { | 1408 | if (!tx) { |
1398 | dev_err(&device->pdev->dev, | 1409 | dev_err(&device->pdev->dev, |
1399 | "Self-test prep failed, disabling\n"); | 1410 | "Self-test prep failed, disabling\n"); |
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index e202a6ce557..9a5bc1a7389 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
@@ -1272,7 +1272,8 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) | |||
1272 | /* Other interrupts do not interfere with this channel */ | 1272 | /* Other interrupts do not interfere with this channel */ |
1273 | spin_lock(&ichan->lock); | 1273 | spin_lock(&ichan->lock); |
1274 | if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 && | 1274 | if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 && |
1275 | ((curbuf >> chan_id) & 1) == ichan->active_buffer)) { | 1275 | ((curbuf >> chan_id) & 1) == ichan->active_buffer && |
1276 | !list_is_last(ichan->queue.next, &ichan->queue))) { | ||
1276 | int i = 100; | 1277 | int i = 100; |
1277 | 1278 | ||
1278 | /* This doesn't help. See comment in ipu_disable_channel() */ | 1279 | /* This doesn't help. See comment in ipu_disable_channel() */ |
@@ -1547,7 +1548,7 @@ static irqreturn_t ic_sof_irq(int irq, void *dev_id) | |||
1547 | struct idmac_channel *ichan = dev_id; | 1548 | struct idmac_channel *ichan = dev_id; |
1548 | printk(KERN_DEBUG "Got SOF IRQ %d on Channel %d\n", | 1549 | printk(KERN_DEBUG "Got SOF IRQ %d on Channel %d\n", |
1549 | irq, ichan->dma_chan.chan_id); | 1550 | irq, ichan->dma_chan.chan_id); |
1550 | disable_irq(irq); | 1551 | disable_irq_nosync(irq); |
1551 | return IRQ_HANDLED; | 1552 | return IRQ_HANDLED; |
1552 | } | 1553 | } |
1553 | 1554 | ||
@@ -1556,7 +1557,7 @@ static irqreturn_t ic_eof_irq(int irq, void *dev_id) | |||
1556 | struct idmac_channel *ichan = dev_id; | 1557 | struct idmac_channel *ichan = dev_id; |
1557 | printk(KERN_DEBUG "Got EOF IRQ %d on Channel %d\n", | 1558 | printk(KERN_DEBUG "Got EOF IRQ %d on Channel %d\n", |
1558 | irq, ichan->dma_chan.chan_id); | 1559 | irq, ichan->dma_chan.chan_id); |
1559 | disable_irq(irq); | 1560 | disable_irq_nosync(irq); |
1560 | return IRQ_HANDLED; | 1561 | return IRQ_HANDLED; |
1561 | } | 1562 | } |
1562 | 1563 | ||