diff options
| -rw-r--r-- | drivers/dma/ioat/dma_v3.c | 15 |
1 files changed, 9 insertions, 6 deletions
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 3686dddf6bff..35d1e33afd5b 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
| @@ -448,7 +448,8 @@ ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value, | |||
| 448 | /* pass */; | 448 | /* pass */; |
| 449 | else | 449 | else |
| 450 | return NULL; | 450 | return NULL; |
| 451 | for (i = 0; i < num_descs; i++) { | 451 | i = 0; |
| 452 | do { | ||
| 452 | size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); | 453 | size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); |
| 453 | 454 | ||
| 454 | desc = ioat2_get_ring_ent(ioat, idx + i); | 455 | desc = ioat2_get_ring_ent(ioat, idx + i); |
| @@ -463,7 +464,7 @@ ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value, | |||
| 463 | len -= xfer_size; | 464 | len -= xfer_size; |
| 464 | dest += xfer_size; | 465 | dest += xfer_size; |
| 465 | dump_desc_dbg(ioat, desc); | 466 | dump_desc_dbg(ioat, desc); |
| 466 | } | 467 | } while (++i < num_descs); |
| 467 | 468 | ||
| 468 | desc->txd.flags = flags; | 469 | desc->txd.flags = flags; |
| 469 | desc->len = total_len; | 470 | desc->len = total_len; |
| @@ -518,7 +519,8 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
| 518 | /* pass */; | 519 | /* pass */; |
| 519 | else | 520 | else |
| 520 | return NULL; | 521 | return NULL; |
| 521 | for (i = 0; i < num_descs; i += 1 + with_ext) { | 522 | i = 0; |
| 523 | do { | ||
| 522 | struct ioat_raw_descriptor *descs[2]; | 524 | struct ioat_raw_descriptor *descs[2]; |
| 523 | size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); | 525 | size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); |
| 524 | int s; | 526 | int s; |
| @@ -546,7 +548,7 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
| 546 | len -= xfer_size; | 548 | len -= xfer_size; |
| 547 | offset += xfer_size; | 549 | offset += xfer_size; |
| 548 | dump_desc_dbg(ioat, desc); | 550 | dump_desc_dbg(ioat, desc); |
| 549 | } | 551 | } while ((i += 1 + with_ext) < num_descs); |
| 550 | 552 | ||
| 551 | /* last xor descriptor carries the unmap parameters and fence bit */ | 553 | /* last xor descriptor carries the unmap parameters and fence bit */ |
| 552 | desc->txd.flags = flags; | 554 | desc->txd.flags = flags; |
| @@ -664,7 +666,8 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
| 664 | /* pass */; | 666 | /* pass */; |
| 665 | else | 667 | else |
| 666 | return NULL; | 668 | return NULL; |
| 667 | for (i = 0; i < num_descs; i += 1 + with_ext) { | 669 | i = 0; |
| 670 | do { | ||
| 668 | struct ioat_raw_descriptor *descs[2]; | 671 | struct ioat_raw_descriptor *descs[2]; |
| 669 | size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); | 672 | size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); |
| 670 | 673 | ||
| @@ -703,7 +706,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
| 703 | 706 | ||
| 704 | len -= xfer_size; | 707 | len -= xfer_size; |
| 705 | offset += xfer_size; | 708 | offset += xfer_size; |
| 706 | } | 709 | } while ((i += 1 + with_ext) < num_descs); |
| 707 | 710 | ||
| 708 | /* last pq descriptor carries the unmap parameters and fence bit */ | 711 | /* last pq descriptor carries the unmap parameters and fence bit */ |
| 709 | desc->txd.flags = flags; | 712 | desc->txd.flags = flags; |
