diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-14 23:42:45 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-14 23:42:45 -0500 |
commit | e3842cbfe0976b014288147b130551d8bf52b96c (patch) | |
tree | 686501f0eb80076240c5f38b34d1acbb105a190b /drivers/dma/ioat | |
parent | 4d98ead183a2be77bfea425d5243e32629eaaeb1 (diff) | |
parent | 4625d2a513d60ca9c3e8cae42c8f3d9efc1b4211 (diff) |
Merge tag 'dmaengine-4.10-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul:
"Fairly routine update this time around with all changes specific to
drivers:
- New driver for STMicroelectronics FDMA
- Memory-to-memory transfers on dw dmac
- Support for slave maps on pl08x devices
- Bunch of driver fixes to use dma_pool_zalloc
- Bunch of compile and warning fixes spread across drivers"
[ The ST FDMA driver already came in earlier through the remoteproc tree ]
* tag 'dmaengine-4.10-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (68 commits)
dmaengine: sirf-dma: remove unused ‘sdesc’
dmaengine: pl330: remove unused ‘regs’
dmaengine: s3c24xx: remove unused ‘cdata’
dmaengine: stm32-dma: remove unused ‘src_addr’
dmaengine: stm32-dma: remove unused ‘dst_addr’
dmaengine: stm32-dma: remove unused ‘sfcr’
dmaengine: pch_dma: remove unused ‘cookie’
dmaengine: mic_x100_dma: remove unused ‘data’
dmaengine: img-mdc: remove unused ‘prev_phys’
dmaengine: usb-dmac: remove unused ‘uchan’
dmaengine: ioat: remove unused ‘res’
dmaengine: ioat: remove unused ‘ioat_dma’
dmaengine: ioat: remove unused ‘is_raid_device’
dmaengine: pl330: do not generate unaligned access
dmaengine: k3dma: move to dma_pool_zalloc
dmaengine: at_hdmac: move to dma_pool_zalloc
dmaengine: at_xdmac: don't restore unsaved status
dmaengine: ioat: set error code on failures
dmaengine: ioat: set error code on failures
dmaengine: DW DMAC: add multi-block property to device tree
...
Diffstat (limited to 'drivers/dma/ioat')
-rw-r--r-- | drivers/dma/ioat/dma.c | 17 | ||||
-rw-r--r-- | drivers/dma/ioat/init.c | 21 |
2 files changed, 18 insertions, 20 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 49386ce04bf5..a371b07a0981 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include "../dmaengine.h" | 39 | #include "../dmaengine.h" |
40 | 40 | ||
41 | static char *chanerr_str[] = { | 41 | static char *chanerr_str[] = { |
42 | "DMA Transfer Source Address Error", | ||
42 | "DMA Transfer Destination Address Error", | 43 | "DMA Transfer Destination Address Error", |
43 | "Next Descriptor Address Error", | 44 | "Next Descriptor Address Error", |
44 | "Descriptor Error", | 45 | "Descriptor Error", |
@@ -66,7 +67,6 @@ static char *chanerr_str[] = { | |||
66 | "Result Guard Tag verification Error", | 67 | "Result Guard Tag verification Error", |
67 | "Result Application Tag verification Error", | 68 | "Result Application Tag verification Error", |
68 | "Result Reference Tag verification Error", | 69 | "Result Reference Tag verification Error", |
69 | NULL | ||
70 | }; | 70 | }; |
71 | 71 | ||
72 | static void ioat_eh(struct ioatdma_chan *ioat_chan); | 72 | static void ioat_eh(struct ioatdma_chan *ioat_chan); |
@@ -75,13 +75,10 @@ static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr) | |||
75 | { | 75 | { |
76 | int i; | 76 | int i; |
77 | 77 | ||
78 | for (i = 0; i < 32; i++) { | 78 | for (i = 0; i < ARRAY_SIZE(chanerr_str); i++) { |
79 | if ((chanerr >> i) & 1) { | 79 | if ((chanerr >> i) & 1) { |
80 | if (chanerr_str[i]) { | 80 | dev_err(to_dev(ioat_chan), "Err(%d): %s\n", |
81 | dev_err(to_dev(ioat_chan), "Err(%d): %s\n", | 81 | i, chanerr_str[i]); |
82 | i, chanerr_str[i]); | ||
83 | } else | ||
84 | break; | ||
85 | } | 82 | } |
86 | } | 83 | } |
87 | } | 84 | } |
@@ -341,15 +338,12 @@ ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags) | |||
341 | { | 338 | { |
342 | struct ioat_dma_descriptor *hw; | 339 | struct ioat_dma_descriptor *hw; |
343 | struct ioat_ring_ent *desc; | 340 | struct ioat_ring_ent *desc; |
344 | struct ioatdma_device *ioat_dma; | ||
345 | struct ioatdma_chan *ioat_chan = to_ioat_chan(chan); | 341 | struct ioatdma_chan *ioat_chan = to_ioat_chan(chan); |
346 | int chunk; | 342 | int chunk; |
347 | dma_addr_t phys; | 343 | dma_addr_t phys; |
348 | u8 *pos; | 344 | u8 *pos; |
349 | off_t offs; | 345 | off_t offs; |
350 | 346 | ||
351 | ioat_dma = to_ioatdma_device(chan->device); | ||
352 | |||
353 | chunk = idx / IOAT_DESCS_PER_2M; | 347 | chunk = idx / IOAT_DESCS_PER_2M; |
354 | idx &= (IOAT_DESCS_PER_2M - 1); | 348 | idx &= (IOAT_DESCS_PER_2M - 1); |
355 | offs = idx * IOAT_DESC_SZ; | 349 | offs = idx * IOAT_DESC_SZ; |
@@ -614,11 +608,8 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) | |||
614 | 608 | ||
615 | tx = &desc->txd; | 609 | tx = &desc->txd; |
616 | if (tx->cookie) { | 610 | if (tx->cookie) { |
617 | struct dmaengine_result res; | ||
618 | |||
619 | dma_cookie_complete(tx); | 611 | dma_cookie_complete(tx); |
620 | dma_descriptor_unmap(tx); | 612 | dma_descriptor_unmap(tx); |
621 | res.result = DMA_TRANS_NOERROR; | ||
622 | dmaengine_desc_get_callback_invoke(tx, NULL); | 613 | dmaengine_desc_get_callback_invoke(tx, NULL); |
623 | tx->callback = NULL; | 614 | tx->callback = NULL; |
624 | tx->callback_result = NULL; | 615 | tx->callback_result = NULL; |
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 015f7110b96d..90eddd9f07e4 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c | |||
@@ -340,11 +340,13 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma) | |||
340 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | 340 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); |
341 | if (dma_mapping_error(dev, dma_src)) { | 341 | if (dma_mapping_error(dev, dma_src)) { |
342 | dev_err(dev, "mapping src buffer failed\n"); | 342 | dev_err(dev, "mapping src buffer failed\n"); |
343 | err = -ENOMEM; | ||
343 | goto free_resources; | 344 | goto free_resources; |
344 | } | 345 | } |
345 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); | 346 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); |
346 | if (dma_mapping_error(dev, dma_dest)) { | 347 | if (dma_mapping_error(dev, dma_dest)) { |
347 | dev_err(dev, "mapping dest buffer failed\n"); | 348 | dev_err(dev, "mapping dest buffer failed\n"); |
349 | err = -ENOMEM; | ||
348 | goto unmap_src; | 350 | goto unmap_src; |
349 | } | 351 | } |
350 | flags = DMA_PREP_INTERRUPT; | 352 | flags = DMA_PREP_INTERRUPT; |
@@ -827,16 +829,20 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) | |||
827 | op = IOAT_OP_XOR; | 829 | op = IOAT_OP_XOR; |
828 | 830 | ||
829 | dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); | 831 | dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); |
830 | if (dma_mapping_error(dev, dest_dma)) | 832 | if (dma_mapping_error(dev, dest_dma)) { |
833 | err = -ENOMEM; | ||
831 | goto free_resources; | 834 | goto free_resources; |
835 | } | ||
832 | 836 | ||
833 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | 837 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) |
834 | dma_srcs[i] = DMA_ERROR_CODE; | 838 | dma_srcs[i] = DMA_ERROR_CODE; |
835 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) { | 839 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) { |
836 | dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, | 840 | dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, |
837 | DMA_TO_DEVICE); | 841 | DMA_TO_DEVICE); |
838 | if (dma_mapping_error(dev, dma_srcs[i])) | 842 | if (dma_mapping_error(dev, dma_srcs[i])) { |
843 | err = -ENOMEM; | ||
839 | goto dma_unmap; | 844 | goto dma_unmap; |
845 | } | ||
840 | } | 846 | } |
841 | tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | 847 | tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, |
842 | IOAT_NUM_SRC_TEST, PAGE_SIZE, | 848 | IOAT_NUM_SRC_TEST, PAGE_SIZE, |
@@ -904,8 +910,10 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) | |||
904 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { | 910 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { |
905 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, | 911 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, |
906 | DMA_TO_DEVICE); | 912 | DMA_TO_DEVICE); |
907 | if (dma_mapping_error(dev, dma_srcs[i])) | 913 | if (dma_mapping_error(dev, dma_srcs[i])) { |
914 | err = -ENOMEM; | ||
908 | goto dma_unmap; | 915 | goto dma_unmap; |
916 | } | ||
909 | } | 917 | } |
910 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | 918 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, |
911 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | 919 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, |
@@ -957,8 +965,10 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) | |||
957 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { | 965 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { |
958 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, | 966 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, |
959 | DMA_TO_DEVICE); | 967 | DMA_TO_DEVICE); |
960 | if (dma_mapping_error(dev, dma_srcs[i])) | 968 | if (dma_mapping_error(dev, dma_srcs[i])) { |
969 | err = -ENOMEM; | ||
961 | goto dma_unmap; | 970 | goto dma_unmap; |
971 | } | ||
962 | } | 972 | } |
963 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | 973 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, |
964 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | 974 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, |
@@ -1071,7 +1081,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) | |||
1071 | struct dma_device *dma; | 1081 | struct dma_device *dma; |
1072 | struct dma_chan *c; | 1082 | struct dma_chan *c; |
1073 | struct ioatdma_chan *ioat_chan; | 1083 | struct ioatdma_chan *ioat_chan; |
1074 | bool is_raid_device = false; | ||
1075 | int err; | 1084 | int err; |
1076 | u16 val16; | 1085 | u16 val16; |
1077 | 1086 | ||
@@ -1095,7 +1104,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) | |||
1095 | ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); | 1104 | ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); |
1096 | 1105 | ||
1097 | if (ioat_dma->cap & IOAT_CAP_XOR) { | 1106 | if (ioat_dma->cap & IOAT_CAP_XOR) { |
1098 | is_raid_device = true; | ||
1099 | dma->max_xor = 8; | 1107 | dma->max_xor = 8; |
1100 | 1108 | ||
1101 | dma_cap_set(DMA_XOR, dma->cap_mask); | 1109 | dma_cap_set(DMA_XOR, dma->cap_mask); |
@@ -1106,7 +1114,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) | |||
1106 | } | 1114 | } |
1107 | 1115 | ||
1108 | if (ioat_dma->cap & IOAT_CAP_PQ) { | 1116 | if (ioat_dma->cap & IOAT_CAP_PQ) { |
1109 | is_raid_device = true; | ||
1110 | 1117 | ||
1111 | dma->device_prep_dma_pq = ioat_prep_pq; | 1118 | dma->device_prep_dma_pq = ioat_prep_pq; |
1112 | dma->device_prep_dma_pq_val = ioat_prep_pq_val; | 1119 | dma->device_prep_dma_pq_val = ioat_prep_pq_val; |