aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2014-04-03 19:33:49 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-04-03 19:33:49 -0400
commitbce5669be3a8946952258a064ef26defeb887138 (patch)
tree117386b9909882c000f822011c5ea6fdcbab3273 /drivers/dma
parent95959e6a06720834fc80a210e37898341c63cb91 (diff)
parent566b60c04ab230b8cc3845f964306f99504b18df (diff)
Merge branch 'devel-stable' into for-next
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/imx-sdma.c1
-rw-r--r--drivers/dma/ioat/dma.c52
-rw-r--r--drivers/dma/ioat/dma.h1
-rw-r--r--drivers/dma/ioat/dma_v2.c11
-rw-r--r--drivers/dma/ioat/dma_v3.c3
-rw-r--r--drivers/dma/mv_xor.c24
-rw-r--r--drivers/dma/ste_dma40.c4
8 files changed, 71 insertions, 26 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9bed1a2a67a1..605b016bcea4 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -346,6 +346,7 @@ config MOXART_DMA
346 tristate "MOXART DMA support" 346 tristate "MOXART DMA support"
347 depends on ARCH_MOXART 347 depends on ARCH_MOXART
348 select DMA_ENGINE 348 select DMA_ENGINE
349 select DMA_OF
349 select DMA_VIRTUAL_CHANNELS 350 select DMA_VIRTUAL_CHANNELS
350 help 351 help
351 Enable support for the MOXA ART SoC DMA controller. 352 Enable support for the MOXA ART SoC DMA controller.
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 4e7918339b12..19041cefabb1 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -449,6 +449,7 @@ static const struct of_device_id sdma_dt_ids[] = {
449 { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, }, 449 { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
450 { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, }, 450 { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
451 { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, }, 451 { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
452 { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
452 { /* sentinel */ } 453 { /* sentinel */ }
453}; 454};
454MODULE_DEVICE_TABLE(of, sdma_dt_ids); 455MODULE_DEVICE_TABLE(of, sdma_dt_ids);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 87529181efcc..4e3549a16132 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -77,7 +77,8 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
77 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); 77 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
78 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) { 78 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
79 chan = ioat_chan_by_index(instance, bit); 79 chan = ioat_chan_by_index(instance, bit);
80 tasklet_schedule(&chan->cleanup_task); 80 if (test_bit(IOAT_RUN, &chan->state))
81 tasklet_schedule(&chan->cleanup_task);
81 } 82 }
82 83
83 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); 84 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
@@ -93,7 +94,8 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
93{ 94{
94 struct ioat_chan_common *chan = data; 95 struct ioat_chan_common *chan = data;
95 96
96 tasklet_schedule(&chan->cleanup_task); 97 if (test_bit(IOAT_RUN, &chan->state))
98 tasklet_schedule(&chan->cleanup_task);
97 99
98 return IRQ_HANDLED; 100 return IRQ_HANDLED;
99} 101}
@@ -116,7 +118,6 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c
116 chan->timer.function = device->timer_fn; 118 chan->timer.function = device->timer_fn;
117 chan->timer.data = data; 119 chan->timer.data = data;
118 tasklet_init(&chan->cleanup_task, device->cleanup_fn, data); 120 tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
119 tasklet_disable(&chan->cleanup_task);
120} 121}
121 122
122/** 123/**
@@ -354,13 +355,49 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
354 writel(((u64) chan->completion_dma) >> 32, 355 writel(((u64) chan->completion_dma) >> 32,
355 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); 356 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
356 357
357 tasklet_enable(&chan->cleanup_task); 358 set_bit(IOAT_RUN, &chan->state);
358 ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ 359 ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
359 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", 360 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
360 __func__, ioat->desccount); 361 __func__, ioat->desccount);
361 return ioat->desccount; 362 return ioat->desccount;
362} 363}
363 364
365void ioat_stop(struct ioat_chan_common *chan)
366{
367 struct ioatdma_device *device = chan->device;
368 struct pci_dev *pdev = device->pdev;
369 int chan_id = chan_num(chan);
370 struct msix_entry *msix;
371
372 /* 1/ stop irq from firing tasklets
373 * 2/ stop the tasklet from re-arming irqs
374 */
375 clear_bit(IOAT_RUN, &chan->state);
376
377 /* flush inflight interrupts */
378 switch (device->irq_mode) {
379 case IOAT_MSIX:
380 msix = &device->msix_entries[chan_id];
381 synchronize_irq(msix->vector);
382 break;
383 case IOAT_MSI:
384 case IOAT_INTX:
385 synchronize_irq(pdev->irq);
386 break;
387 default:
388 break;
389 }
390
391 /* flush inflight timers */
392 del_timer_sync(&chan->timer);
393
394 /* flush inflight tasklet runs */
395 tasklet_kill(&chan->cleanup_task);
396
397 /* final cleanup now that everything is quiesced and can't re-arm */
398 device->cleanup_fn((unsigned long) &chan->common);
399}
400
364/** 401/**
365 * ioat1_dma_free_chan_resources - release all the descriptors 402 * ioat1_dma_free_chan_resources - release all the descriptors
366 * @chan: the channel to be cleaned 403 * @chan: the channel to be cleaned
@@ -379,9 +416,7 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c)
379 if (ioat->desccount == 0) 416 if (ioat->desccount == 0)
380 return; 417 return;
381 418
382 tasklet_disable(&chan->cleanup_task); 419 ioat_stop(chan);
383 del_timer_sync(&chan->timer);
384 ioat1_cleanup(ioat);
385 420
386 /* Delay 100ms after reset to allow internal DMA logic to quiesce 421 /* Delay 100ms after reset to allow internal DMA logic to quiesce
387 * before removing DMA descriptor resources. 422 * before removing DMA descriptor resources.
@@ -526,8 +561,11 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
526static void ioat1_cleanup_event(unsigned long data) 561static void ioat1_cleanup_event(unsigned long data)
527{ 562{
528 struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); 563 struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
564 struct ioat_chan_common *chan = &ioat->base;
529 565
530 ioat1_cleanup(ioat); 566 ioat1_cleanup(ioat);
567 if (!test_bit(IOAT_RUN, &chan->state))
568 return;
531 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 569 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
532} 570}
533 571
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 11fb877ddca9..e982f00a9843 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -356,6 +356,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
356void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); 356void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
357void ioat_kobject_del(struct ioatdma_device *device); 357void ioat_kobject_del(struct ioatdma_device *device);
358int ioat_dma_setup_interrupts(struct ioatdma_device *device); 358int ioat_dma_setup_interrupts(struct ioatdma_device *device);
359void ioat_stop(struct ioat_chan_common *chan);
359extern const struct sysfs_ops ioat_sysfs_ops; 360extern const struct sysfs_ops ioat_sysfs_ops;
360extern struct ioat_sysfs_entry ioat_version_attr; 361extern struct ioat_sysfs_entry ioat_version_attr;
361extern struct ioat_sysfs_entry ioat_cap_attr; 362extern struct ioat_sysfs_entry ioat_cap_attr;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 5d3affe7e976..8d1058085eeb 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -190,8 +190,11 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
190void ioat2_cleanup_event(unsigned long data) 190void ioat2_cleanup_event(unsigned long data)
191{ 191{
192 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); 192 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
193 struct ioat_chan_common *chan = &ioat->base;
193 194
194 ioat2_cleanup(ioat); 195 ioat2_cleanup(ioat);
196 if (!test_bit(IOAT_RUN, &chan->state))
197 return;
195 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 198 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
196} 199}
197 200
@@ -553,10 +556,10 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
553 ioat->issued = 0; 556 ioat->issued = 0;
554 ioat->tail = 0; 557 ioat->tail = 0;
555 ioat->alloc_order = order; 558 ioat->alloc_order = order;
559 set_bit(IOAT_RUN, &chan->state);
556 spin_unlock_bh(&ioat->prep_lock); 560 spin_unlock_bh(&ioat->prep_lock);
557 spin_unlock_bh(&chan->cleanup_lock); 561 spin_unlock_bh(&chan->cleanup_lock);
558 562
559 tasklet_enable(&chan->cleanup_task);
560 ioat2_start_null_desc(ioat); 563 ioat2_start_null_desc(ioat);
561 564
562 /* check that we got off the ground */ 565 /* check that we got off the ground */
@@ -566,7 +569,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
566 } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status)); 569 } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
567 570
568 if (is_ioat_active(status) || is_ioat_idle(status)) { 571 if (is_ioat_active(status) || is_ioat_idle(status)) {
569 set_bit(IOAT_RUN, &chan->state);
570 return 1 << ioat->alloc_order; 572 return 1 << ioat->alloc_order;
571 } else { 573 } else {
572 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 574 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
@@ -809,11 +811,8 @@ void ioat2_free_chan_resources(struct dma_chan *c)
809 if (!ioat->ring) 811 if (!ioat->ring)
810 return; 812 return;
811 813
812 tasklet_disable(&chan->cleanup_task); 814 ioat_stop(chan);
813 del_timer_sync(&chan->timer);
814 device->cleanup_fn((unsigned long) c);
815 device->reset_hw(chan); 815 device->reset_hw(chan);
816 clear_bit(IOAT_RUN, &chan->state);
817 816
818 spin_lock_bh(&chan->cleanup_lock); 817 spin_lock_bh(&chan->cleanup_lock);
819 spin_lock_bh(&ioat->prep_lock); 818 spin_lock_bh(&ioat->prep_lock);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 820817e97e62..b9b38a1cf92f 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -464,8 +464,11 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
464static void ioat3_cleanup_event(unsigned long data) 464static void ioat3_cleanup_event(unsigned long data)
465{ 465{
466 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); 466 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
467 struct ioat_chan_common *chan = &ioat->base;
467 468
468 ioat3_cleanup(ioat); 469 ioat3_cleanup(ioat);
470 if (!test_bit(IOAT_RUN, &chan->state))
471 return;
469 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 472 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
470} 473}
471 474
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 53fb0c8365b0..766b68ed505c 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -497,8 +497,8 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
497 if (!mv_can_chain(grp_start)) 497 if (!mv_can_chain(grp_start))
498 goto submit_done; 498 goto submit_done;
499 499
500 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n", 500 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
501 old_chain_tail->async_tx.phys); 501 &old_chain_tail->async_tx.phys);
502 502
503 /* fix up the hardware chain */ 503 /* fix up the hardware chain */
504 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); 504 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
@@ -527,7 +527,8 @@ submit_done:
527/* returns the number of allocated descriptors */ 527/* returns the number of allocated descriptors */
528static int mv_xor_alloc_chan_resources(struct dma_chan *chan) 528static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
529{ 529{
530 char *hw_desc; 530 void *virt_desc;
531 dma_addr_t dma_desc;
531 int idx; 532 int idx;
532 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 533 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
533 struct mv_xor_desc_slot *slot = NULL; 534 struct mv_xor_desc_slot *slot = NULL;
@@ -542,17 +543,16 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
542 " %d descriptor slots", idx); 543 " %d descriptor slots", idx);
543 break; 544 break;
544 } 545 }
545 hw_desc = (char *) mv_chan->dma_desc_pool_virt; 546 virt_desc = mv_chan->dma_desc_pool_virt;
546 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 547 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
547 548
548 dma_async_tx_descriptor_init(&slot->async_tx, chan); 549 dma_async_tx_descriptor_init(&slot->async_tx, chan);
549 slot->async_tx.tx_submit = mv_xor_tx_submit; 550 slot->async_tx.tx_submit = mv_xor_tx_submit;
550 INIT_LIST_HEAD(&slot->chain_node); 551 INIT_LIST_HEAD(&slot->chain_node);
551 INIT_LIST_HEAD(&slot->slot_node); 552 INIT_LIST_HEAD(&slot->slot_node);
552 INIT_LIST_HEAD(&slot->tx_list); 553 INIT_LIST_HEAD(&slot->tx_list);
553 hw_desc = (char *) mv_chan->dma_desc_pool; 554 dma_desc = mv_chan->dma_desc_pool;
554 slot->async_tx.phys = 555 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
555 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
556 slot->idx = idx++; 556 slot->idx = idx++;
557 557
558 spin_lock_bh(&mv_chan->lock); 558 spin_lock_bh(&mv_chan->lock);
@@ -582,8 +582,8 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
582 int slot_cnt; 582 int slot_cnt;
583 583
584 dev_dbg(mv_chan_to_devp(mv_chan), 584 dev_dbg(mv_chan_to_devp(mv_chan),
585 "%s dest: %x src %x len: %u flags: %ld\n", 585 "%s dest: %pad src %pad len: %u flags: %ld\n",
586 __func__, dest, src, len, flags); 586 __func__, &dest, &src, len, flags);
587 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 587 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
588 return NULL; 588 return NULL;
589 589
@@ -626,8 +626,8 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
626 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); 626 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
627 627
628 dev_dbg(mv_chan_to_devp(mv_chan), 628 dev_dbg(mv_chan_to_devp(mv_chan),
629 "%s src_cnt: %d len: dest %x %u flags: %ld\n", 629 "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
630 __func__, src_cnt, len, dest, flags); 630 __func__, src_cnt, len, &dest, flags);
631 631
632 spin_lock_bh(&mv_chan->lock); 632 spin_lock_bh(&mv_chan->lock);
633 slot_cnt = mv_chan_xor_slot_count(len, src_cnt); 633 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 00a2de957b23..bf18c786ed40 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1641,6 +1641,7 @@ static void dma_tasklet(unsigned long data)
1641 struct d40_chan *d40c = (struct d40_chan *) data; 1641 struct d40_chan *d40c = (struct d40_chan *) data;
1642 struct d40_desc *d40d; 1642 struct d40_desc *d40d;
1643 unsigned long flags; 1643 unsigned long flags;
1644 bool callback_active;
1644 dma_async_tx_callback callback; 1645 dma_async_tx_callback callback;
1645 void *callback_param; 1646 void *callback_param;
1646 1647
@@ -1668,6 +1669,7 @@ static void dma_tasklet(unsigned long data)
1668 } 1669 }
1669 1670
1670 /* Callback to client */ 1671 /* Callback to client */
1672 callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
1671 callback = d40d->txd.callback; 1673 callback = d40d->txd.callback;
1672 callback_param = d40d->txd.callback_param; 1674 callback_param = d40d->txd.callback_param;
1673 1675
@@ -1690,7 +1692,7 @@ static void dma_tasklet(unsigned long data)
1690 1692
1691 spin_unlock_irqrestore(&d40c->lock, flags); 1693 spin_unlock_irqrestore(&d40c->lock, flags);
1692 1694
1693 if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT)) 1695 if (callback_active && callback)
1694 callback(callback_param); 1696 callback(callback_param);
1695 1697
1696 return; 1698 return;