aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 20:43:00 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:43:00 -0400
commitae786624c27411c1d38823f640b39f3d97412d5a (patch)
tree87ca33dae521c2c5622ea67dde97611e77d77df8 /drivers
parentd69d235b7da2778891640ee95efcd68075978904 (diff)
ioat3: support xor via pq descriptors
If a platform advertises pq capabilities, but not xor, then use ioat3_prep_pqxor and ioat3_prep_pqxor_val to simulate xor support. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/ioat/dma_v3.c49
1 files changed, 49 insertions, 0 deletions
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index ca2af0fa1c36..bb57491f3fb3 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -766,6 +766,44 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
766 flags); 766 flags);
767} 767}
768 768
769static struct dma_async_tx_descriptor *
770ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
771 unsigned int src_cnt, size_t len, unsigned long flags)
772{
773 unsigned char scf[src_cnt];
774 dma_addr_t pq[2];
775
776 memset(scf, 0, src_cnt);
777 flags |= DMA_PREP_PQ_DISABLE_Q;
778 pq[0] = dst;
779 pq[1] = ~0;
780
781 return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
782 flags);
783}
784
785struct dma_async_tx_descriptor *
786ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
787 unsigned int src_cnt, size_t len,
788 enum sum_check_flags *result, unsigned long flags)
789{
790 unsigned char scf[src_cnt];
791 dma_addr_t pq[2];
792
793 /* the cleanup routine only sets bits on validate failure, it
794 * does not clear bits on validate success... so clear it here
795 */
796 *result = 0;
797
798 memset(scf, 0, src_cnt);
799 flags |= DMA_PREP_PQ_DISABLE_Q;
800 pq[0] = src[0];
801 pq[1] = ~0;
802
803 return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf,
804 len, flags);
805}
806
769static void __devinit ioat3_dma_test_callback(void *dma_async_param) 807static void __devinit ioat3_dma_test_callback(void *dma_async_param)
770{ 808{
771 struct completion *cmp = dma_async_param; 809 struct completion *cmp = dma_async_param;
@@ -1084,6 +1122,17 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1084 1122
1085 dma_cap_set(DMA_PQ_VAL, dma->cap_mask); 1123 dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
1086 dma->device_prep_dma_pq_val = ioat3_prep_pq_val; 1124 dma->device_prep_dma_pq_val = ioat3_prep_pq_val;
1125
1126 if (!(cap & IOAT_CAP_XOR)) {
1127 dma->max_xor = 8;
1128 dma->xor_align = 2;
1129
1130 dma_cap_set(DMA_XOR, dma->cap_mask);
1131 dma->device_prep_dma_xor = ioat3_prep_pqxor;
1132
1133 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1134 dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
1135 }
1087 } 1136 }
1088 1137
1089 /* -= IOAT ver.3 workarounds =- */ 1138 /* -= IOAT ver.3 workarounds =- */