diff options
author | Dan Williams <dan.j.williams@intel.com> | 2008-07-17 20:59:55 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2008-07-17 20:59:55 -0400 |
commit | 669ab0b210f9bd15d94d4d6a49ae13366a85e4da (patch) | |
tree | ead561418e94def95c4d4b4512ae3c9a7888febe /crypto | |
parent | 1e55db2d6bdef92abc981b68673564e63c80da4d (diff) |
async_tx: fix handling of the "out of descriptor" condition in async_xor
Ensure forward progress is made when a dmaengine driver is unable to
allocate an xor descriptor by breaking the dependency chain with
async_tx_quisce() and issue any pending descriptors.
Tested with iop-adma by setting device->max_xor = 2 to force multiple
calls to device_prep_dma_xor for each call to async_xor and limiting the
descriptor slot pool to 5. Discovered that the minimum descriptor pool
size for iop-adma is 2 * iop_chan_xor_slot_cnt(device->max_xor) + 1.
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/async_tx/async_xor.c | 32 |
1 files changed, 26 insertions, 6 deletions
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 1fcf45ac81ec..19d16e452bcc 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c | |||
@@ -30,6 +30,24 @@ | |||
30 | #include <linux/raid/xor.h> | 30 | #include <linux/raid/xor.h> |
31 | #include <linux/async_tx.h> | 31 | #include <linux/async_tx.h> |
32 | 32 | ||
33 | /** | ||
34 | * async_tx_quiesce - ensure tx is complete and freeable upon return | ||
35 | * @tx - transaction to quiesce | ||
36 | */ | ||
37 | static void async_tx_quiesce(struct dma_async_tx_descriptor **tx) | ||
38 | { | ||
39 | if (*tx) { | ||
40 | /* if ack is already set then we cannot be sure | ||
41 | * we are referring to the correct operation | ||
42 | */ | ||
43 | BUG_ON(async_tx_test_ack(*tx)); | ||
44 | if (dma_wait_for_async_tx(*tx) == DMA_ERROR) | ||
45 | panic("DMA_ERROR waiting for transaction\n"); | ||
46 | async_tx_ack(*tx); | ||
47 | *tx = NULL; | ||
48 | } | ||
49 | } | ||
50 | |||
33 | /* do_async_xor - dma map the pages and perform the xor with an engine. | 51 | /* do_async_xor - dma map the pages and perform the xor with an engine. |
34 | * This routine is marked __always_inline so it can be compiled away | 52 | * This routine is marked __always_inline so it can be compiled away |
35 | * when CONFIG_DMA_ENGINE=n | 53 | * when CONFIG_DMA_ENGINE=n |
@@ -85,15 +103,17 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | |||
85 | tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off], | 103 | tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off], |
86 | xor_src_cnt, len, dma_flags); | 104 | xor_src_cnt, len, dma_flags); |
87 | 105 | ||
88 | if (unlikely(!tx && depend_tx)) | 106 | if (unlikely(!tx)) |
89 | dma_wait_for_async_tx(depend_tx); | 107 | async_tx_quiesce(&depend_tx); |
90 | 108 | ||
91 | /* spin wait for the preceeding transactions to complete */ | 109 | /* spin wait for the preceeding transactions to complete */ |
92 | while (unlikely(!tx)) | 110 | while (unlikely(!tx)) { |
111 | dma_async_issue_pending(chan); | ||
93 | tx = dma->device_prep_dma_xor(chan, dma_dest, | 112 | tx = dma->device_prep_dma_xor(chan, dma_dest, |
94 | &dma_src[src_off], | 113 | &dma_src[src_off], |
95 | xor_src_cnt, len, | 114 | xor_src_cnt, len, |
96 | dma_flags); | 115 | dma_flags); |
116 | } | ||
97 | 117 | ||
98 | async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn, | 118 | async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn, |
99 | _cb_param); | 119 | _cb_param); |
@@ -267,11 +287,11 @@ async_xor_zero_sum(struct page *dest, struct page **src_list, | |||
267 | tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt, | 287 | tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt, |
268 | len, result, | 288 | len, result, |
269 | dma_prep_flags); | 289 | dma_prep_flags); |
270 | if (!tx) { | 290 | if (unlikely(!tx)) { |
271 | if (depend_tx) | 291 | async_tx_quiesce(&depend_tx); |
272 | dma_wait_for_async_tx(depend_tx); | ||
273 | 292 | ||
274 | while (!tx) | 293 | while (!tx) |
294 | dma_async_issue_pending(chan); | ||
275 | tx = device->device_prep_dma_zero_sum(chan, | 295 | tx = device->device_prep_dma_zero_sum(chan, |
276 | dma_src, src_cnt, len, result, | 296 | dma_src, src_cnt, len, result, |
277 | dma_prep_flags); | 297 | dma_prep_flags); |