diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:43:00 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:43:00 -0400 |
commit | 58c8649e0e25de511c4a66ce3fa38891e2ec4e9e (patch) | |
tree | edb87012a3e42a7bbaa26a1172442da6ea389632 /drivers/dma | |
parent | ae786624c27411c1d38823f640b39f3d97412d5a (diff) |
ioat3: interrupt descriptor support
The async_tx api uses the DMA_INTERRUPT operation type to terminate a
chain of issued operations with a callback routine.
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 39 |
1 files changed, 38 insertions, 1 deletions
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index bb57491f3fb3..ff4afdc8e59b 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -120,7 +120,8 @@ static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, | |||
120 | 120 | ||
121 | switch (desc->hw->ctl_f.op) { | 121 | switch (desc->hw->ctl_f.op) { |
122 | case IOAT_OP_COPY: | 122 | case IOAT_OP_COPY: |
123 | ioat_dma_unmap(chan, flags, len, desc->hw); | 123 | if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */ |
124 | ioat_dma_unmap(chan, flags, len, desc->hw); | ||
124 | break; | 125 | break; |
125 | case IOAT_OP_FILL: { | 126 | case IOAT_OP_FILL: { |
126 | struct ioat_fill_descriptor *hw = desc->fill; | 127 | struct ioat_fill_descriptor *hw = desc->fill; |
@@ -804,6 +805,38 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | |||
804 | len, flags); | 805 | len, flags); |
805 | } | 806 | } |
806 | 807 | ||
808 | static struct dma_async_tx_descriptor * | ||
809 | ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) | ||
810 | { | ||
811 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
812 | struct ioat_ring_ent *desc; | ||
813 | struct ioat_dma_descriptor *hw; | ||
814 | u16 idx; | ||
815 | |||
816 | if (ioat2_alloc_and_lock(&idx, ioat, 1) == 0) | ||
817 | desc = ioat2_get_ring_ent(ioat, idx); | ||
818 | else | ||
819 | return NULL; | ||
820 | |||
821 | hw = desc->hw; | ||
822 | hw->ctl = 0; | ||
823 | hw->ctl_f.null = 1; | ||
824 | hw->ctl_f.int_en = 1; | ||
825 | hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | ||
826 | hw->ctl_f.compl_write = 1; | ||
827 | hw->size = NULL_DESC_BUFFER_SIZE; | ||
828 | hw->src_addr = 0; | ||
829 | hw->dst_addr = 0; | ||
830 | |||
831 | desc->txd.flags = flags; | ||
832 | desc->len = 1; | ||
833 | |||
834 | dump_desc_dbg(ioat, desc); | ||
835 | |||
836 | /* we leave the channel locked to ensure in order submission */ | ||
837 | return &desc->txd; | ||
838 | } | ||
839 | |||
807 | static void __devinit ioat3_dma_test_callback(void *dma_async_param) | 840 | static void __devinit ioat3_dma_test_callback(void *dma_async_param) |
808 | { | 841 | { |
809 | struct completion *cmp = dma_async_param; | 842 | struct completion *cmp = dma_async_param; |
@@ -1098,6 +1131,10 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1098 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | 1131 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; |
1099 | dma->device_free_chan_resources = ioat2_free_chan_resources; | 1132 | dma->device_free_chan_resources = ioat2_free_chan_resources; |
1100 | dma->device_is_tx_complete = ioat3_is_complete; | 1133 | dma->device_is_tx_complete = ioat3_is_complete; |
1134 | |||
1135 | dma_cap_set(DMA_INTERRUPT, dma->cap_mask); | ||
1136 | dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; | ||
1137 | |||
1101 | cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); | 1138 | cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); |
1102 | if (cap & IOAT_CAP_FILL_BLOCK) { | 1139 | if (cap & IOAT_CAP_FILL_BLOCK) { |
1103 | dma_cap_set(DMA_MEMSET, dma->cap_mask); | 1140 | dma_cap_set(DMA_MEMSET, dma->cap_mask); |