diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-11-30 16:53:53 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-11-30 16:53:53 -0500 |
| commit | f50733450362182fd16d658751615635850a8bff (patch) | |
| tree | f6eb22b1e51b2b29f4f528dc7d05dd6f07f3788b | |
| parent | 50b767d0baee51be5b11703cdb2a5202f5b67582 (diff) | |
| parent | 56adf7e8127d601b172e180b44551ce83404348f (diff) | |
Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx:
shdma: fix initialization error handling
ioat3: fix pq completion versus channel deallocation race
async_tx: build-time toggling of async_{syndrome,xor}_val dma support
dmaengine: include xor/pq validate in device_has_all_tx_types()
ioat2,3: report all uncorrectable errors
ioat3: specify valid address for disabled-Q or disabled-P
ioat2,3: disable asynchronous error notifications
ioat3: dca and raid operations are incompatible
ioat: silence "dca disabled" messages
| -rw-r--r-- | crypto/async_tx/Kconfig | 5 | ||||
| -rw-r--r-- | crypto/async_tx/async_pq.c | 14 | ||||
| -rw-r--r-- | crypto/async_tx/async_xor.c | 15 | ||||
| -rw-r--r-- | drivers/dma/Kconfig | 2 | ||||
| -rw-r--r-- | drivers/dma/dmaengine.c | 10 | ||||
| -rw-r--r-- | drivers/dma/ioat/dca.c | 6 | ||||
| -rw-r--r-- | drivers/dma/ioat/dma.h | 4 | ||||
| -rw-r--r-- | drivers/dma/ioat/dma_v2.c | 2 | ||||
| -rw-r--r-- | drivers/dma/ioat/dma_v3.c | 44 | ||||
| -rw-r--r-- | drivers/dma/ioat/hw.h | 2 | ||||
| -rw-r--r-- | drivers/dma/ioat/registers.h | 4 | ||||
| -rw-r--r-- | drivers/dma/shdma.c | 12 |
12 files changed, 91 insertions, 29 deletions
diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig index e5aeb2b79e6f..e28e276ac611 100644 --- a/crypto/async_tx/Kconfig +++ b/crypto/async_tx/Kconfig | |||
| @@ -23,3 +23,8 @@ config ASYNC_RAID6_RECOV | |||
| 23 | select ASYNC_CORE | 23 | select ASYNC_CORE |
| 24 | select ASYNC_PQ | 24 | select ASYNC_PQ |
| 25 | 25 | ||
| 26 | config ASYNC_TX_DISABLE_PQ_VAL_DMA | ||
| 27 | bool | ||
| 28 | |||
| 29 | config ASYNC_TX_DISABLE_XOR_VAL_DMA | ||
| 30 | bool | ||
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 6b5cc4fba59f..ec87f53d5059 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c | |||
| @@ -240,6 +240,16 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | |||
| 240 | } | 240 | } |
| 241 | EXPORT_SYMBOL_GPL(async_gen_syndrome); | 241 | EXPORT_SYMBOL_GPL(async_gen_syndrome); |
| 242 | 242 | ||
| 243 | static inline struct dma_chan * | ||
| 244 | pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len) | ||
| 245 | { | ||
| 246 | #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA | ||
| 247 | return NULL; | ||
| 248 | #endif | ||
| 249 | return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks, | ||
| 250 | disks, len); | ||
| 251 | } | ||
| 252 | |||
| 243 | /** | 253 | /** |
| 244 | * async_syndrome_val - asynchronously validate a raid6 syndrome | 254 | * async_syndrome_val - asynchronously validate a raid6 syndrome |
| 245 | * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 | 255 | * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 |
| @@ -260,9 +270,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, | |||
| 260 | size_t len, enum sum_check_flags *pqres, struct page *spare, | 270 | size_t len, enum sum_check_flags *pqres, struct page *spare, |
| 261 | struct async_submit_ctl *submit) | 271 | struct async_submit_ctl *submit) |
| 262 | { | 272 | { |
| 263 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ_VAL, | 273 | struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len); |
| 264 | NULL, 0, blocks, disks, | ||
| 265 | len); | ||
| 266 | struct dma_device *device = chan ? chan->device : NULL; | 274 | struct dma_device *device = chan ? chan->device : NULL; |
| 267 | struct dma_async_tx_descriptor *tx; | 275 | struct dma_async_tx_descriptor *tx; |
| 268 | unsigned char coefs[disks-2]; | 276 | unsigned char coefs[disks-2]; |
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 79182dcb91b7..079ae8ca590b 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c | |||
| @@ -234,6 +234,17 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len) | |||
| 234 | memcmp(a, a + 4, len - 4) == 0); | 234 | memcmp(a, a + 4, len - 4) == 0); |
| 235 | } | 235 | } |
| 236 | 236 | ||
| 237 | static inline struct dma_chan * | ||
| 238 | xor_val_chan(struct async_submit_ctl *submit, struct page *dest, | ||
| 239 | struct page **src_list, int src_cnt, size_t len) | ||
| 240 | { | ||
| 241 | #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA | ||
| 242 | return NULL; | ||
| 243 | #endif | ||
| 244 | return async_tx_find_channel(submit, DMA_XOR_VAL, &dest, 1, src_list, | ||
| 245 | src_cnt, len); | ||
| 246 | } | ||
| 247 | |||
| 237 | /** | 248 | /** |
| 238 | * async_xor_val - attempt a xor parity check with a dma engine. | 249 | * async_xor_val - attempt a xor parity check with a dma engine. |
| 239 | * @dest: destination page used if the xor is performed synchronously | 250 | * @dest: destination page used if the xor is performed synchronously |
| @@ -255,9 +266,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, | |||
| 255 | int src_cnt, size_t len, enum sum_check_flags *result, | 266 | int src_cnt, size_t len, enum sum_check_flags *result, |
| 256 | struct async_submit_ctl *submit) | 267 | struct async_submit_ctl *submit) |
| 257 | { | 268 | { |
| 258 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL, | 269 | struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len); |
| 259 | &dest, 1, src_list, | ||
| 260 | src_cnt, len); | ||
| 261 | struct dma_device *device = chan ? chan->device : NULL; | 270 | struct dma_device *device = chan ? chan->device : NULL; |
| 262 | struct dma_async_tx_descriptor *tx = NULL; | 271 | struct dma_async_tx_descriptor *tx = NULL; |
| 263 | dma_addr_t *dma_src = NULL; | 272 | dma_addr_t *dma_src = NULL; |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 5903a88351bf..b401dadad4a8 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
| @@ -26,6 +26,8 @@ config INTEL_IOATDMA | |||
| 26 | select DMA_ENGINE | 26 | select DMA_ENGINE |
| 27 | select DCA | 27 | select DCA |
| 28 | select ASYNC_TX_DISABLE_CHANNEL_SWITCH | 28 | select ASYNC_TX_DISABLE_CHANNEL_SWITCH |
| 29 | select ASYNC_TX_DISABLE_PQ_VAL_DMA | ||
| 30 | select ASYNC_TX_DISABLE_XOR_VAL_DMA | ||
| 29 | help | 31 | help |
| 30 | Enable support for the Intel(R) I/OAT DMA engine present | 32 | Enable support for the Intel(R) I/OAT DMA engine present |
| 31 | in recent Intel Xeon chipsets. | 33 | in recent Intel Xeon chipsets. |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index bd0b248de2cf..8f99354082ce 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
| @@ -632,11 +632,21 @@ static bool device_has_all_tx_types(struct dma_device *device) | |||
| 632 | #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) | 632 | #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) |
| 633 | if (!dma_has_cap(DMA_XOR, device->cap_mask)) | 633 | if (!dma_has_cap(DMA_XOR, device->cap_mask)) |
| 634 | return false; | 634 | return false; |
| 635 | |||
| 636 | #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA | ||
| 637 | if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) | ||
| 638 | return false; | ||
| 639 | #endif | ||
| 635 | #endif | 640 | #endif |
| 636 | 641 | ||
| 637 | #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) | 642 | #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) |
| 638 | if (!dma_has_cap(DMA_PQ, device->cap_mask)) | 643 | if (!dma_has_cap(DMA_PQ, device->cap_mask)) |
| 639 | return false; | 644 | return false; |
| 645 | |||
| 646 | #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA | ||
| 647 | if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) | ||
| 648 | return false; | ||
| 649 | #endif | ||
| 640 | #endif | 650 | #endif |
| 641 | 651 | ||
| 642 | return true; | 652 | return true; |
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c index 69d02615c4d6..abd9038e06b1 100644 --- a/drivers/dma/ioat/dca.c +++ b/drivers/dma/ioat/dca.c | |||
| @@ -98,17 +98,17 @@ static int dca_enabled_in_bios(struct pci_dev *pdev) | |||
| 98 | cpuid_level_9 = cpuid_eax(9); | 98 | cpuid_level_9 = cpuid_eax(9); |
| 99 | res = test_bit(0, &cpuid_level_9); | 99 | res = test_bit(0, &cpuid_level_9); |
| 100 | if (!res) | 100 | if (!res) |
| 101 | dev_err(&pdev->dev, "DCA is disabled in BIOS\n"); | 101 | dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n"); |
| 102 | 102 | ||
| 103 | return res; | 103 | return res; |
| 104 | } | 104 | } |
| 105 | 105 | ||
| 106 | static int system_has_dca_enabled(struct pci_dev *pdev) | 106 | int system_has_dca_enabled(struct pci_dev *pdev) |
| 107 | { | 107 | { |
| 108 | if (boot_cpu_has(X86_FEATURE_DCA)) | 108 | if (boot_cpu_has(X86_FEATURE_DCA)) |
| 109 | return dca_enabled_in_bios(pdev); | 109 | return dca_enabled_in_bios(pdev); |
| 110 | 110 | ||
| 111 | dev_err(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n"); | 111 | dev_dbg(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n"); |
| 112 | return 0; | 112 | return 0; |
| 113 | } | 113 | } |
| 114 | 114 | ||
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index c14fdfeb7f33..45edde996480 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
| @@ -297,9 +297,7 @@ static inline bool is_ioat_suspended(unsigned long status) | |||
| 297 | /* channel was fatally programmed */ | 297 | /* channel was fatally programmed */ |
| 298 | static inline bool is_ioat_bug(unsigned long err) | 298 | static inline bool is_ioat_bug(unsigned long err) |
| 299 | { | 299 | { |
| 300 | return !!(err & (IOAT_CHANERR_SRC_ADDR_ERR|IOAT_CHANERR_DEST_ADDR_ERR| | 300 | return !!err; |
| 301 | IOAT_CHANERR_NEXT_ADDR_ERR|IOAT_CHANERR_CONTROL_ERR| | ||
| 302 | IOAT_CHANERR_LENGTH_ERR)); | ||
| 303 | } | 301 | } |
| 304 | 302 | ||
| 305 | static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, | 303 | static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, |
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 96ffab7d37a7..8f1f7f05deaa 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
| @@ -279,6 +279,8 @@ void ioat2_timer_event(unsigned long data) | |||
| 279 | u32 chanerr; | 279 | u32 chanerr; |
| 280 | 280 | ||
| 281 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | 281 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); |
| 282 | dev_err(to_dev(chan), "%s: Channel halted (%x)\n", | ||
| 283 | __func__, chanerr); | ||
| 282 | BUG_ON(is_ioat_bug(chanerr)); | 284 | BUG_ON(is_ioat_bug(chanerr)); |
| 283 | } | 285 | } |
| 284 | 286 | ||
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 35d1e33afd5b..42f6f10fb0cc 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
| @@ -378,6 +378,8 @@ static void ioat3_timer_event(unsigned long data) | |||
| 378 | u32 chanerr; | 378 | u32 chanerr; |
| 379 | 379 | ||
| 380 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | 380 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); |
| 381 | dev_err(to_dev(chan), "%s: Channel halted (%x)\n", | ||
| 382 | __func__, chanerr); | ||
| 381 | BUG_ON(is_ioat_bug(chanerr)); | 383 | BUG_ON(is_ioat_bug(chanerr)); |
| 382 | } | 384 | } |
| 383 | 385 | ||
| @@ -569,7 +571,7 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
| 569 | dump_desc_dbg(ioat, compl_desc); | 571 | dump_desc_dbg(ioat, compl_desc); |
| 570 | 572 | ||
| 571 | /* we leave the channel locked to ensure in order submission */ | 573 | /* we leave the channel locked to ensure in order submission */ |
| 572 | return &desc->txd; | 574 | return &compl_desc->txd; |
| 573 | } | 575 | } |
| 574 | 576 | ||
| 575 | static struct dma_async_tx_descriptor * | 577 | static struct dma_async_tx_descriptor * |
| @@ -728,7 +730,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
| 728 | dump_desc_dbg(ioat, compl_desc); | 730 | dump_desc_dbg(ioat, compl_desc); |
| 729 | 731 | ||
| 730 | /* we leave the channel locked to ensure in order submission */ | 732 | /* we leave the channel locked to ensure in order submission */ |
| 731 | return &desc->txd; | 733 | return &compl_desc->txd; |
| 732 | } | 734 | } |
| 733 | 735 | ||
| 734 | static struct dma_async_tx_descriptor * | 736 | static struct dma_async_tx_descriptor * |
| @@ -736,10 +738,16 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | |||
| 736 | unsigned int src_cnt, const unsigned char *scf, size_t len, | 738 | unsigned int src_cnt, const unsigned char *scf, size_t len, |
| 737 | unsigned long flags) | 739 | unsigned long flags) |
| 738 | { | 740 | { |
| 741 | /* specify valid address for disabled result */ | ||
| 742 | if (flags & DMA_PREP_PQ_DISABLE_P) | ||
| 743 | dst[0] = dst[1]; | ||
| 744 | if (flags & DMA_PREP_PQ_DISABLE_Q) | ||
| 745 | dst[1] = dst[0]; | ||
| 746 | |||
| 739 | /* handle the single source multiply case from the raid6 | 747 | /* handle the single source multiply case from the raid6 |
| 740 | * recovery path | 748 | * recovery path |
| 741 | */ | 749 | */ |
| 742 | if (unlikely((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1)) { | 750 | if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) { |
| 743 | dma_addr_t single_source[2]; | 751 | dma_addr_t single_source[2]; |
| 744 | unsigned char single_source_coef[2]; | 752 | unsigned char single_source_coef[2]; |
| 745 | 753 | ||
| @@ -761,6 +769,12 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | |||
| 761 | unsigned int src_cnt, const unsigned char *scf, size_t len, | 769 | unsigned int src_cnt, const unsigned char *scf, size_t len, |
| 762 | enum sum_check_flags *pqres, unsigned long flags) | 770 | enum sum_check_flags *pqres, unsigned long flags) |
| 763 | { | 771 | { |
| 772 | /* specify valid address for disabled result */ | ||
| 773 | if (flags & DMA_PREP_PQ_DISABLE_P) | ||
| 774 | pq[0] = pq[1]; | ||
| 775 | if (flags & DMA_PREP_PQ_DISABLE_Q) | ||
| 776 | pq[1] = pq[0]; | ||
| 777 | |||
| 764 | /* the cleanup routine only sets bits on validate failure, it | 778 | /* the cleanup routine only sets bits on validate failure, it |
| 765 | * does not clear bits on validate success... so clear it here | 779 | * does not clear bits on validate success... so clear it here |
| 766 | */ | 780 | */ |
| @@ -778,9 +792,9 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, | |||
| 778 | dma_addr_t pq[2]; | 792 | dma_addr_t pq[2]; |
| 779 | 793 | ||
| 780 | memset(scf, 0, src_cnt); | 794 | memset(scf, 0, src_cnt); |
| 781 | flags |= DMA_PREP_PQ_DISABLE_Q; | ||
| 782 | pq[0] = dst; | 795 | pq[0] = dst; |
| 783 | pq[1] = ~0; | 796 | flags |= DMA_PREP_PQ_DISABLE_Q; |
| 797 | pq[1] = dst; /* specify valid address for disabled result */ | ||
| 784 | 798 | ||
| 785 | return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, | 799 | return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, |
| 786 | flags); | 800 | flags); |
| @@ -800,9 +814,9 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | |||
| 800 | *result = 0; | 814 | *result = 0; |
| 801 | 815 | ||
| 802 | memset(scf, 0, src_cnt); | 816 | memset(scf, 0, src_cnt); |
| 803 | flags |= DMA_PREP_PQ_DISABLE_Q; | ||
| 804 | pq[0] = src[0]; | 817 | pq[0] = src[0]; |
| 805 | pq[1] = ~0; | 818 | flags |= DMA_PREP_PQ_DISABLE_Q; |
| 819 | pq[1] = pq[0]; /* specify valid address for disabled result */ | ||
| 806 | 820 | ||
| 807 | return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf, | 821 | return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf, |
| 808 | len, flags); | 822 | len, flags); |
| @@ -1117,6 +1131,7 @@ static int __devinit ioat3_dma_self_test(struct ioatdma_device *device) | |||
| 1117 | int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | 1131 | int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) |
| 1118 | { | 1132 | { |
| 1119 | struct pci_dev *pdev = device->pdev; | 1133 | struct pci_dev *pdev = device->pdev; |
| 1134 | int dca_en = system_has_dca_enabled(pdev); | ||
| 1120 | struct dma_device *dma; | 1135 | struct dma_device *dma; |
| 1121 | struct dma_chan *c; | 1136 | struct dma_chan *c; |
| 1122 | struct ioat_chan_common *chan; | 1137 | struct ioat_chan_common *chan; |
| @@ -1137,6 +1152,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
| 1137 | dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; | 1152 | dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; |
| 1138 | 1153 | ||
| 1139 | cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); | 1154 | cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); |
| 1155 | |||
| 1156 | /* dca is incompatible with raid operations */ | ||
| 1157 | if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) | ||
| 1158 | cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); | ||
| 1159 | |||
| 1140 | if (cap & IOAT_CAP_XOR) { | 1160 | if (cap & IOAT_CAP_XOR) { |
| 1141 | is_raid_device = true; | 1161 | is_raid_device = true; |
| 1142 | dma->max_xor = 8; | 1162 | dma->max_xor = 8; |
| @@ -1186,6 +1206,16 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
| 1186 | device->timer_fn = ioat2_timer_event; | 1206 | device->timer_fn = ioat2_timer_event; |
| 1187 | } | 1207 | } |
| 1188 | 1208 | ||
| 1209 | #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA | ||
| 1210 | dma_cap_clear(DMA_PQ_VAL, dma->cap_mask); | ||
| 1211 | dma->device_prep_dma_pq_val = NULL; | ||
| 1212 | #endif | ||
| 1213 | |||
| 1214 | #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA | ||
| 1215 | dma_cap_clear(DMA_XOR_VAL, dma->cap_mask); | ||
| 1216 | dma->device_prep_dma_xor_val = NULL; | ||
| 1217 | #endif | ||
| 1218 | |||
| 1189 | /* -= IOAT ver.3 workarounds =- */ | 1219 | /* -= IOAT ver.3 workarounds =- */ |
| 1190 | /* Write CHANERRMSK_INT with 3E07h to mask out the errors | 1220 | /* Write CHANERRMSK_INT with 3E07h to mask out the errors |
| 1191 | * that can cause stability issues for IOAT ver.3 | 1221 | * that can cause stability issues for IOAT ver.3 |
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index 99afb12bd409..60e675455b6a 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h | |||
| @@ -39,6 +39,8 @@ | |||
| 39 | #define IOAT_VER_3_0 0x30 /* Version 3.0 */ | 39 | #define IOAT_VER_3_0 0x30 /* Version 3.0 */ |
| 40 | #define IOAT_VER_3_2 0x32 /* Version 3.2 */ | 40 | #define IOAT_VER_3_2 0x32 /* Version 3.2 */ |
| 41 | 41 | ||
| 42 | int system_has_dca_enabled(struct pci_dev *pdev); | ||
| 43 | |||
| 42 | struct ioat_dma_descriptor { | 44 | struct ioat_dma_descriptor { |
| 43 | uint32_t size; | 45 | uint32_t size; |
| 44 | union { | 46 | union { |
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index 63038e18ab03..f015ec196700 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h | |||
| @@ -92,9 +92,7 @@ | |||
| 92 | #define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 | 92 | #define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 |
| 93 | #define IOAT_CHANCTRL_INT_REARM 0x0001 | 93 | #define IOAT_CHANCTRL_INT_REARM 0x0001 |
| 94 | #define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\ | 94 | #define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\ |
| 95 | IOAT_CHANCTRL_ERR_COMPLETION_EN |\ | 95 | IOAT_CHANCTRL_ANY_ERR_ABORT_EN) |
| 96 | IOAT_CHANCTRL_ANY_ERR_ABORT_EN |\ | ||
| 97 | IOAT_CHANCTRL_ERR_INT_EN) | ||
| 98 | 96 | ||
| 99 | #define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */ | 97 | #define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */ |
| 100 | #define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */ | 98 | #define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */ |
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index b3b065c4e5c1..034ecf0ace03 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
| @@ -640,17 +640,16 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
| 640 | #endif | 640 | #endif |
| 641 | struct sh_dmae_device *shdev; | 641 | struct sh_dmae_device *shdev; |
| 642 | 642 | ||
| 643 | /* get platform data */ | ||
| 644 | if (!pdev->dev.platform_data) | ||
| 645 | return -ENODEV; | ||
| 646 | |||
| 643 | shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); | 647 | shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); |
| 644 | if (!shdev) { | 648 | if (!shdev) { |
| 645 | dev_err(&pdev->dev, "No enough memory\n"); | 649 | dev_err(&pdev->dev, "No enough memory\n"); |
| 646 | err = -ENOMEM; | 650 | return -ENOMEM; |
| 647 | goto shdev_err; | ||
| 648 | } | 651 | } |
| 649 | 652 | ||
| 650 | /* get platform data */ | ||
| 651 | if (!pdev->dev.platform_data) | ||
| 652 | goto shdev_err; | ||
| 653 | |||
| 654 | /* platform data */ | 653 | /* platform data */ |
| 655 | memcpy(&shdev->pdata, pdev->dev.platform_data, | 654 | memcpy(&shdev->pdata, pdev->dev.platform_data, |
| 656 | sizeof(struct sh_dmae_pdata)); | 655 | sizeof(struct sh_dmae_pdata)); |
| @@ -722,7 +721,6 @@ eirq_err: | |||
| 722 | rst_err: | 721 | rst_err: |
| 723 | kfree(shdev); | 722 | kfree(shdev); |
| 724 | 723 | ||
| 725 | shdev_err: | ||
| 726 | return err; | 724 | return err; |
| 727 | } | 725 | } |
| 728 | 726 | ||
