aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ioat
diff options
context:
space:
mode:
authorDave Jiang <dave.jiang@intel.com>2013-04-10 19:44:20 -0400
committerVinod Koul <vinod.koul@intel.com>2013-04-15 13:16:14 -0400
commite0884772d323b745c65baa65df391b1c70829410 (patch)
treeaa760881884a0d80672a1c2646600ad9130d2cb7 /drivers/dma/ioat
parent42c91ee71d6dfa074b4c79abb95eb095430f83af (diff)
ioatdma: Removing hw bug workaround for CB3.x .2 and earlier
CB3.2 and earlier hardware has silicon bugs that are no longer needed with the new hardware. We don't have to use a NULL op to signal interrupt for RAID ops any longer. This code make sure the legacy workarounds only happen on legacy hardware. Signed-off-by: Dave Jiang <dave.jiang@intel.com> Acked-by: Dan Williams <djbw@fb.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/ioat')
-rw-r--r--drivers/dma/ioat/dma_v3.c31
1 files changed, 20 insertions, 11 deletions
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index cf97e3f16924..639311598f35 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -837,6 +837,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
837{ 837{
838 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 838 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
839 struct ioat_chan_common *chan = &ioat->base; 839 struct ioat_chan_common *chan = &ioat->base;
840 struct ioatdma_device *device = chan->device;
840 struct ioat_ring_ent *compl_desc; 841 struct ioat_ring_ent *compl_desc;
841 struct ioat_ring_ent *desc; 842 struct ioat_ring_ent *desc;
842 struct ioat_ring_ent *ext; 843 struct ioat_ring_ent *ext;
@@ -847,6 +848,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
847 u32 offset = 0; 848 u32 offset = 0;
848 u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; 849 u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
849 int i, s, idx, with_ext, num_descs; 850 int i, s, idx, with_ext, num_descs;
851 int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0;
850 852
851 dev_dbg(to_dev(chan), "%s\n", __func__); 853 dev_dbg(to_dev(chan), "%s\n", __func__);
852 /* the engine requires at least two sources (we provide 854 /* the engine requires at least two sources (we provide
@@ -872,7 +874,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
872 * order. 874 * order.
873 */ 875 */
874 if (likely(num_descs) && 876 if (likely(num_descs) &&
875 ioat2_check_space_lock(ioat, num_descs+1) == 0) 877 ioat2_check_space_lock(ioat, num_descs + cb32) == 0)
876 idx = ioat->head; 878 idx = ioat->head;
877 else 879 else
878 return NULL; 880 return NULL;
@@ -926,16 +928,23 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
926 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); 928 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
927 dump_pq_desc_dbg(ioat, desc, ext); 929 dump_pq_desc_dbg(ioat, desc, ext);
928 930
929 /* completion descriptor carries interrupt bit */ 931 if (!cb32) {
930 compl_desc = ioat2_get_ring_ent(ioat, idx + i); 932 pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
931 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; 933 pq->ctl_f.compl_write = 1;
932 hw = compl_desc->hw; 934 compl_desc = desc;
933 hw->ctl = 0; 935 } else {
934 hw->ctl_f.null = 1; 936 /* completion descriptor carries interrupt bit */
935 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); 937 compl_desc = ioat2_get_ring_ent(ioat, idx + i);
936 hw->ctl_f.compl_write = 1; 938 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
937 hw->size = NULL_DESC_BUFFER_SIZE; 939 hw = compl_desc->hw;
938 dump_desc_dbg(ioat, compl_desc); 940 hw->ctl = 0;
941 hw->ctl_f.null = 1;
942 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
943 hw->ctl_f.compl_write = 1;
944 hw->size = NULL_DESC_BUFFER_SIZE;
945 dump_desc_dbg(ioat, compl_desc);
946 }
947
939 948
940 /* we leave the channel locked to ensure in order submission */ 949 /* we leave the channel locked to ensure in order submission */
941 return &compl_desc->txd; 950 return &compl_desc->txd;