aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.com>2016-01-06 19:02:34 -0500
committerVinod Koul <vinod.koul@intel.com>2016-01-07 00:36:18 -0500
commitb02bab6b0f928d49dbfb03e1e4e9dd43647623d7 (patch)
tree1d7648d55adc0d47d6da533f7a96a70aa9cf8ec8
parent16605e8d50898ac88b5b504a7fbd63ecdcf37702 (diff)
async_tx: use GFP_NOWAIT rather than GFP_IO
These async_XX functions are called from md/raid5 in an atomic section, between get_cpu() and put_cpu(), so they must not sleep. So use GFP_NOWAIT rather than GFP_IO. Dan Williams writes: Longer term async_tx needs to be merged into md directly as we can allocate this unmap data statically per-stripe rather than per request. Fixed: 7476bd79fc01 ("async_pq: convert to dmaengine_unmap_data") Cc: stable@vger.kernel.org (v3.13+) Reported-and-tested-by: Stanislav Samsonov <slava@annapurnalabs.com> Acked-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: NeilBrown <neilb@suse.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--crypto/async_tx/async_memcpy.c2
-rw-r--r--crypto/async_tx/async_pq.c4
-rw-r--r--crypto/async_tx/async_raid6_recov.c4
-rw-r--r--crypto/async_tx/async_xor.c4
4 files changed, 7 insertions, 7 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index f8c0b8dbeb75..88bc8e6b2a54 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -53,7 +53,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
53 struct dmaengine_unmap_data *unmap = NULL; 53 struct dmaengine_unmap_data *unmap = NULL;
54 54
55 if (device) 55 if (device)
56 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); 56 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
57 57
58 if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { 58 if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
59 unsigned long dma_prep_flags = 0; 59 unsigned long dma_prep_flags = 0;
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 5d355e0c2633..c0748bbd4c08 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -188,7 +188,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
188 BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); 188 BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
189 189
190 if (device) 190 if (device)
191 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); 191 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
192 192
193 /* XORing P/Q is only implemented in software */ 193 /* XORing P/Q is only implemented in software */
194 if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) && 194 if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
@@ -307,7 +307,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
307 BUG_ON(disks < 4); 307 BUG_ON(disks < 4);
308 308
309 if (device) 309 if (device)
310 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); 310 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
311 311
312 if (unmap && disks <= dma_maxpq(device, 0) && 312 if (unmap && disks <= dma_maxpq(device, 0) &&
313 is_dma_pq_aligned(device, offset, 0, len)) { 313 is_dma_pq_aligned(device, offset, 0, len)) {
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
index 934a84981495..8fab6275ea1f 100644
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -41,7 +41,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
41 u8 *a, *b, *c; 41 u8 *a, *b, *c;
42 42
43 if (dma) 43 if (dma)
44 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); 44 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
45 45
46 if (unmap) { 46 if (unmap) {
47 struct device *dev = dma->dev; 47 struct device *dev = dma->dev;
@@ -105,7 +105,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
105 u8 *d, *s; 105 u8 *d, *s;
106 106
107 if (dma) 107 if (dma)
108 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); 108 unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
109 109
110 if (unmap) { 110 if (unmap) {
111 dma_addr_t dma_dest[2]; 111 dma_addr_t dma_dest[2];
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index e1bce26cd4f9..da75777f2b3f 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -182,7 +182,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
182 BUG_ON(src_cnt <= 1); 182 BUG_ON(src_cnt <= 1);
183 183
184 if (device) 184 if (device)
185 unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO); 185 unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT);
186 186
187 if (unmap && is_dma_xor_aligned(device, offset, 0, len)) { 187 if (unmap && is_dma_xor_aligned(device, offset, 0, len)) {
188 struct dma_async_tx_descriptor *tx; 188 struct dma_async_tx_descriptor *tx;
@@ -278,7 +278,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
278 BUG_ON(src_cnt <= 1); 278 BUG_ON(src_cnt <= 1);
279 279
280 if (device) 280 if (device)
281 unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO); 281 unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOWAIT);
282 282
283 if (unmap && src_cnt <= device->max_xor && 283 if (unmap && src_cnt <= device->max_xor &&
284 is_dma_xor_aligned(device, offset, 0, len)) { 284 is_dma_xor_aligned(device, offset, 0, len)) {