diff options
Diffstat (limited to 'drivers/dma/ioat/dma_v3.c')
| -rw-r--r-- | drivers/dma/ioat/dma_v3.c | 44 |
1 files changed, 37 insertions, 7 deletions
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 35d1e33afd5b..42f6f10fb0cc 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
| @@ -378,6 +378,8 @@ static void ioat3_timer_event(unsigned long data) | |||
| 378 | u32 chanerr; | 378 | u32 chanerr; |
| 379 | 379 | ||
| 380 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | 380 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); |
| 381 | dev_err(to_dev(chan), "%s: Channel halted (%x)\n", | ||
| 382 | __func__, chanerr); | ||
| 381 | BUG_ON(is_ioat_bug(chanerr)); | 383 | BUG_ON(is_ioat_bug(chanerr)); |
| 382 | } | 384 | } |
| 383 | 385 | ||
| @@ -569,7 +571,7 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
| 569 | dump_desc_dbg(ioat, compl_desc); | 571 | dump_desc_dbg(ioat, compl_desc); |
| 570 | 572 | ||
| 571 | /* we leave the channel locked to ensure in order submission */ | 573 | /* we leave the channel locked to ensure in order submission */ |
| 572 | return &desc->txd; | 574 | return &compl_desc->txd; |
| 573 | } | 575 | } |
| 574 | 576 | ||
| 575 | static struct dma_async_tx_descriptor * | 577 | static struct dma_async_tx_descriptor * |
| @@ -728,7 +730,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
| 728 | dump_desc_dbg(ioat, compl_desc); | 730 | dump_desc_dbg(ioat, compl_desc); |
| 729 | 731 | ||
| 730 | /* we leave the channel locked to ensure in order submission */ | 732 | /* we leave the channel locked to ensure in order submission */ |
| 731 | return &desc->txd; | 733 | return &compl_desc->txd; |
| 732 | } | 734 | } |
| 733 | 735 | ||
| 734 | static struct dma_async_tx_descriptor * | 736 | static struct dma_async_tx_descriptor * |
| @@ -736,10 +738,16 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | |||
| 736 | unsigned int src_cnt, const unsigned char *scf, size_t len, | 738 | unsigned int src_cnt, const unsigned char *scf, size_t len, |
| 737 | unsigned long flags) | 739 | unsigned long flags) |
| 738 | { | 740 | { |
| 741 | /* specify valid address for disabled result */ | ||
| 742 | if (flags & DMA_PREP_PQ_DISABLE_P) | ||
| 743 | dst[0] = dst[1]; | ||
| 744 | if (flags & DMA_PREP_PQ_DISABLE_Q) | ||
| 745 | dst[1] = dst[0]; | ||
| 746 | |||
| 739 | /* handle the single source multiply case from the raid6 | 747 | /* handle the single source multiply case from the raid6 |
| 740 | * recovery path | 748 | * recovery path |
| 741 | */ | 749 | */ |
| 742 | if (unlikely((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1)) { | 750 | if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) { |
| 743 | dma_addr_t single_source[2]; | 751 | dma_addr_t single_source[2]; |
| 744 | unsigned char single_source_coef[2]; | 752 | unsigned char single_source_coef[2]; |
| 745 | 753 | ||
| @@ -761,6 +769,12 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | |||
| 761 | unsigned int src_cnt, const unsigned char *scf, size_t len, | 769 | unsigned int src_cnt, const unsigned char *scf, size_t len, |
| 762 | enum sum_check_flags *pqres, unsigned long flags) | 770 | enum sum_check_flags *pqres, unsigned long flags) |
| 763 | { | 771 | { |
| 772 | /* specify valid address for disabled result */ | ||
| 773 | if (flags & DMA_PREP_PQ_DISABLE_P) | ||
| 774 | pq[0] = pq[1]; | ||
| 775 | if (flags & DMA_PREP_PQ_DISABLE_Q) | ||
| 776 | pq[1] = pq[0]; | ||
| 777 | |||
| 764 | /* the cleanup routine only sets bits on validate failure, it | 778 | /* the cleanup routine only sets bits on validate failure, it |
| 765 | * does not clear bits on validate success... so clear it here | 779 | * does not clear bits on validate success... so clear it here |
| 766 | */ | 780 | */ |
| @@ -778,9 +792,9 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, | |||
| 778 | dma_addr_t pq[2]; | 792 | dma_addr_t pq[2]; |
| 779 | 793 | ||
| 780 | memset(scf, 0, src_cnt); | 794 | memset(scf, 0, src_cnt); |
| 781 | flags |= DMA_PREP_PQ_DISABLE_Q; | ||
| 782 | pq[0] = dst; | 795 | pq[0] = dst; |
| 783 | pq[1] = ~0; | 796 | flags |= DMA_PREP_PQ_DISABLE_Q; |
| 797 | pq[1] = dst; /* specify valid address for disabled result */ | ||
| 784 | 798 | ||
| 785 | return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, | 799 | return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, |
| 786 | flags); | 800 | flags); |
| @@ -800,9 +814,9 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | |||
| 800 | *result = 0; | 814 | *result = 0; |
| 801 | 815 | ||
| 802 | memset(scf, 0, src_cnt); | 816 | memset(scf, 0, src_cnt); |
| 803 | flags |= DMA_PREP_PQ_DISABLE_Q; | ||
| 804 | pq[0] = src[0]; | 817 | pq[0] = src[0]; |
| 805 | pq[1] = ~0; | 818 | flags |= DMA_PREP_PQ_DISABLE_Q; |
| 819 | pq[1] = pq[0]; /* specify valid address for disabled result */ | ||
| 806 | 820 | ||
| 807 | return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf, | 821 | return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf, |
| 808 | len, flags); | 822 | len, flags); |
| @@ -1117,6 +1131,7 @@ static int __devinit ioat3_dma_self_test(struct ioatdma_device *device) | |||
| 1117 | int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | 1131 | int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) |
| 1118 | { | 1132 | { |
| 1119 | struct pci_dev *pdev = device->pdev; | 1133 | struct pci_dev *pdev = device->pdev; |
| 1134 | int dca_en = system_has_dca_enabled(pdev); | ||
| 1120 | struct dma_device *dma; | 1135 | struct dma_device *dma; |
| 1121 | struct dma_chan *c; | 1136 | struct dma_chan *c; |
| 1122 | struct ioat_chan_common *chan; | 1137 | struct ioat_chan_common *chan; |
| @@ -1137,6 +1152,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
| 1137 | dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; | 1152 | dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; |
| 1138 | 1153 | ||
| 1139 | cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); | 1154 | cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); |
| 1155 | |||
| 1156 | /* dca is incompatible with raid operations */ | ||
| 1157 | if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) | ||
| 1158 | cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); | ||
| 1159 | |||
| 1140 | if (cap & IOAT_CAP_XOR) { | 1160 | if (cap & IOAT_CAP_XOR) { |
| 1141 | is_raid_device = true; | 1161 | is_raid_device = true; |
| 1142 | dma->max_xor = 8; | 1162 | dma->max_xor = 8; |
| @@ -1186,6 +1206,16 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
| 1186 | device->timer_fn = ioat2_timer_event; | 1206 | device->timer_fn = ioat2_timer_event; |
| 1187 | } | 1207 | } |
| 1188 | 1208 | ||
| 1209 | #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA | ||
| 1210 | dma_cap_clear(DMA_PQ_VAL, dma->cap_mask); | ||
| 1211 | dma->device_prep_dma_pq_val = NULL; | ||
| 1212 | #endif | ||
| 1213 | |||
| 1214 | #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA | ||
| 1215 | dma_cap_clear(DMA_XOR_VAL, dma->cap_mask); | ||
| 1216 | dma->device_prep_dma_xor_val = NULL; | ||
| 1217 | #endif | ||
| 1218 | |||
| 1189 | /* -= IOAT ver.3 workarounds =- */ | 1219 | /* -= IOAT ver.3 workarounds =- */ |
| 1190 | /* Write CHANERRMSK_INT with 3E07h to mask out the errors | 1220 | /* Write CHANERRMSK_INT with 3E07h to mask out the errors |
| 1191 | * that can cause stability issues for IOAT ver.3 | 1221 | * that can cause stability issues for IOAT ver.3 |
