diff options
Diffstat (limited to 'drivers/dma/ioat')
-rw-r--r-- | drivers/dma/ioat/dca.c | 6 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.h | 4 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.c | 2 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 44 | ||||
-rw-r--r-- | drivers/dma/ioat/hw.h | 2 | ||||
-rw-r--r-- | drivers/dma/ioat/registers.h | 4 |
6 files changed, 46 insertions, 16 deletions
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c index 69d02615c4d6..abd9038e06b1 100644 --- a/drivers/dma/ioat/dca.c +++ b/drivers/dma/ioat/dca.c | |||
@@ -98,17 +98,17 @@ static int dca_enabled_in_bios(struct pci_dev *pdev) | |||
98 | cpuid_level_9 = cpuid_eax(9); | 98 | cpuid_level_9 = cpuid_eax(9); |
99 | res = test_bit(0, &cpuid_level_9); | 99 | res = test_bit(0, &cpuid_level_9); |
100 | if (!res) | 100 | if (!res) |
101 | dev_err(&pdev->dev, "DCA is disabled in BIOS\n"); | 101 | dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n"); |
102 | 102 | ||
103 | return res; | 103 | return res; |
104 | } | 104 | } |
105 | 105 | ||
106 | static int system_has_dca_enabled(struct pci_dev *pdev) | 106 | int system_has_dca_enabled(struct pci_dev *pdev) |
107 | { | 107 | { |
108 | if (boot_cpu_has(X86_FEATURE_DCA)) | 108 | if (boot_cpu_has(X86_FEATURE_DCA)) |
109 | return dca_enabled_in_bios(pdev); | 109 | return dca_enabled_in_bios(pdev); |
110 | 110 | ||
111 | dev_err(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n"); | 111 | dev_dbg(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n"); |
112 | return 0; | 112 | return 0; |
113 | } | 113 | } |
114 | 114 | ||
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index c14fdfeb7f33..45edde996480 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -297,9 +297,7 @@ static inline bool is_ioat_suspended(unsigned long status) | |||
297 | /* channel was fatally programmed */ | 297 | /* channel was fatally programmed */ |
298 | static inline bool is_ioat_bug(unsigned long err) | 298 | static inline bool is_ioat_bug(unsigned long err) |
299 | { | 299 | { |
300 | return !!(err & (IOAT_CHANERR_SRC_ADDR_ERR|IOAT_CHANERR_DEST_ADDR_ERR| | 300 | return !!err; |
301 | IOAT_CHANERR_NEXT_ADDR_ERR|IOAT_CHANERR_CONTROL_ERR| | ||
302 | IOAT_CHANERR_LENGTH_ERR)); | ||
303 | } | 301 | } |
304 | 302 | ||
305 | static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, | 303 | static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, |
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 96ffab7d37a7..8f1f7f05deaa 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
@@ -279,6 +279,8 @@ void ioat2_timer_event(unsigned long data) | |||
279 | u32 chanerr; | 279 | u32 chanerr; |
280 | 280 | ||
281 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | 281 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); |
282 | dev_err(to_dev(chan), "%s: Channel halted (%x)\n", | ||
283 | __func__, chanerr); | ||
282 | BUG_ON(is_ioat_bug(chanerr)); | 284 | BUG_ON(is_ioat_bug(chanerr)); |
283 | } | 285 | } |
284 | 286 | ||
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 35d1e33afd5b..42f6f10fb0cc 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -378,6 +378,8 @@ static void ioat3_timer_event(unsigned long data) | |||
378 | u32 chanerr; | 378 | u32 chanerr; |
379 | 379 | ||
380 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | 380 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); |
381 | dev_err(to_dev(chan), "%s: Channel halted (%x)\n", | ||
382 | __func__, chanerr); | ||
381 | BUG_ON(is_ioat_bug(chanerr)); | 383 | BUG_ON(is_ioat_bug(chanerr)); |
382 | } | 384 | } |
383 | 385 | ||
@@ -569,7 +571,7 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
569 | dump_desc_dbg(ioat, compl_desc); | 571 | dump_desc_dbg(ioat, compl_desc); |
570 | 572 | ||
571 | /* we leave the channel locked to ensure in order submission */ | 573 | /* we leave the channel locked to ensure in order submission */ |
572 | return &desc->txd; | 574 | return &compl_desc->txd; |
573 | } | 575 | } |
574 | 576 | ||
575 | static struct dma_async_tx_descriptor * | 577 | static struct dma_async_tx_descriptor * |
@@ -728,7 +730,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
728 | dump_desc_dbg(ioat, compl_desc); | 730 | dump_desc_dbg(ioat, compl_desc); |
729 | 731 | ||
730 | /* we leave the channel locked to ensure in order submission */ | 732 | /* we leave the channel locked to ensure in order submission */ |
731 | return &desc->txd; | 733 | return &compl_desc->txd; |
732 | } | 734 | } |
733 | 735 | ||
734 | static struct dma_async_tx_descriptor * | 736 | static struct dma_async_tx_descriptor * |
@@ -736,10 +738,16 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | |||
736 | unsigned int src_cnt, const unsigned char *scf, size_t len, | 738 | unsigned int src_cnt, const unsigned char *scf, size_t len, |
737 | unsigned long flags) | 739 | unsigned long flags) |
738 | { | 740 | { |
741 | /* specify valid address for disabled result */ | ||
742 | if (flags & DMA_PREP_PQ_DISABLE_P) | ||
743 | dst[0] = dst[1]; | ||
744 | if (flags & DMA_PREP_PQ_DISABLE_Q) | ||
745 | dst[1] = dst[0]; | ||
746 | |||
739 | /* handle the single source multiply case from the raid6 | 747 | /* handle the single source multiply case from the raid6 |
740 | * recovery path | 748 | * recovery path |
741 | */ | 749 | */ |
742 | if (unlikely((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1)) { | 750 | if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) { |
743 | dma_addr_t single_source[2]; | 751 | dma_addr_t single_source[2]; |
744 | unsigned char single_source_coef[2]; | 752 | unsigned char single_source_coef[2]; |
745 | 753 | ||
@@ -761,6 +769,12 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | |||
761 | unsigned int src_cnt, const unsigned char *scf, size_t len, | 769 | unsigned int src_cnt, const unsigned char *scf, size_t len, |
762 | enum sum_check_flags *pqres, unsigned long flags) | 770 | enum sum_check_flags *pqres, unsigned long flags) |
763 | { | 771 | { |
772 | /* specify valid address for disabled result */ | ||
773 | if (flags & DMA_PREP_PQ_DISABLE_P) | ||
774 | pq[0] = pq[1]; | ||
775 | if (flags & DMA_PREP_PQ_DISABLE_Q) | ||
776 | pq[1] = pq[0]; | ||
777 | |||
764 | /* the cleanup routine only sets bits on validate failure, it | 778 | /* the cleanup routine only sets bits on validate failure, it |
765 | * does not clear bits on validate success... so clear it here | 779 | * does not clear bits on validate success... so clear it here |
766 | */ | 780 | */ |
@@ -778,9 +792,9 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, | |||
778 | dma_addr_t pq[2]; | 792 | dma_addr_t pq[2]; |
779 | 793 | ||
780 | memset(scf, 0, src_cnt); | 794 | memset(scf, 0, src_cnt); |
781 | flags |= DMA_PREP_PQ_DISABLE_Q; | ||
782 | pq[0] = dst; | 795 | pq[0] = dst; |
783 | pq[1] = ~0; | 796 | flags |= DMA_PREP_PQ_DISABLE_Q; |
797 | pq[1] = dst; /* specify valid address for disabled result */ | ||
784 | 798 | ||
785 | return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, | 799 | return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, |
786 | flags); | 800 | flags); |
@@ -800,9 +814,9 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | |||
800 | *result = 0; | 814 | *result = 0; |
801 | 815 | ||
802 | memset(scf, 0, src_cnt); | 816 | memset(scf, 0, src_cnt); |
803 | flags |= DMA_PREP_PQ_DISABLE_Q; | ||
804 | pq[0] = src[0]; | 817 | pq[0] = src[0]; |
805 | pq[1] = ~0; | 818 | flags |= DMA_PREP_PQ_DISABLE_Q; |
819 | pq[1] = pq[0]; /* specify valid address for disabled result */ | ||
806 | 820 | ||
807 | return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf, | 821 | return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf, |
808 | len, flags); | 822 | len, flags); |
@@ -1117,6 +1131,7 @@ static int __devinit ioat3_dma_self_test(struct ioatdma_device *device) | |||
1117 | int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | 1131 | int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) |
1118 | { | 1132 | { |
1119 | struct pci_dev *pdev = device->pdev; | 1133 | struct pci_dev *pdev = device->pdev; |
1134 | int dca_en = system_has_dca_enabled(pdev); | ||
1120 | struct dma_device *dma; | 1135 | struct dma_device *dma; |
1121 | struct dma_chan *c; | 1136 | struct dma_chan *c; |
1122 | struct ioat_chan_common *chan; | 1137 | struct ioat_chan_common *chan; |
@@ -1137,6 +1152,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1137 | dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; | 1152 | dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; |
1138 | 1153 | ||
1139 | cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); | 1154 | cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); |
1155 | |||
1156 | /* dca is incompatible with raid operations */ | ||
1157 | if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) | ||
1158 | cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); | ||
1159 | |||
1140 | if (cap & IOAT_CAP_XOR) { | 1160 | if (cap & IOAT_CAP_XOR) { |
1141 | is_raid_device = true; | 1161 | is_raid_device = true; |
1142 | dma->max_xor = 8; | 1162 | dma->max_xor = 8; |
@@ -1186,6 +1206,16 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1186 | device->timer_fn = ioat2_timer_event; | 1206 | device->timer_fn = ioat2_timer_event; |
1187 | } | 1207 | } |
1188 | 1208 | ||
1209 | #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA | ||
1210 | dma_cap_clear(DMA_PQ_VAL, dma->cap_mask); | ||
1211 | dma->device_prep_dma_pq_val = NULL; | ||
1212 | #endif | ||
1213 | |||
1214 | #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA | ||
1215 | dma_cap_clear(DMA_XOR_VAL, dma->cap_mask); | ||
1216 | dma->device_prep_dma_xor_val = NULL; | ||
1217 | #endif | ||
1218 | |||
1189 | /* -= IOAT ver.3 workarounds =- */ | 1219 | /* -= IOAT ver.3 workarounds =- */ |
1190 | /* Write CHANERRMSK_INT with 3E07h to mask out the errors | 1220 | /* Write CHANERRMSK_INT with 3E07h to mask out the errors |
1191 | * that can cause stability issues for IOAT ver.3 | 1221 | * that can cause stability issues for IOAT ver.3 |
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index 99afb12bd409..60e675455b6a 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h | |||
@@ -39,6 +39,8 @@ | |||
39 | #define IOAT_VER_3_0 0x30 /* Version 3.0 */ | 39 | #define IOAT_VER_3_0 0x30 /* Version 3.0 */ |
40 | #define IOAT_VER_3_2 0x32 /* Version 3.2 */ | 40 | #define IOAT_VER_3_2 0x32 /* Version 3.2 */ |
41 | 41 | ||
42 | int system_has_dca_enabled(struct pci_dev *pdev); | ||
43 | |||
42 | struct ioat_dma_descriptor { | 44 | struct ioat_dma_descriptor { |
43 | uint32_t size; | 45 | uint32_t size; |
44 | union { | 46 | union { |
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index 63038e18ab03..f015ec196700 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h | |||
@@ -92,9 +92,7 @@ | |||
92 | #define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 | 92 | #define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 |
93 | #define IOAT_CHANCTRL_INT_REARM 0x0001 | 93 | #define IOAT_CHANCTRL_INT_REARM 0x0001 |
94 | #define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\ | 94 | #define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\ |
95 | IOAT_CHANCTRL_ERR_COMPLETION_EN |\ | 95 | IOAT_CHANCTRL_ANY_ERR_ABORT_EN) |
96 | IOAT_CHANCTRL_ANY_ERR_ABORT_EN |\ | ||
97 | IOAT_CHANCTRL_ERR_INT_EN) | ||
98 | 96 | ||
99 | #define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */ | 97 | #define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */ |
100 | #define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */ | 98 | #define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */ |