summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKyle Spiers <ksspiers@google.com>2018-06-01 16:20:16 -0400
committerVinod Koul <vkoul@kernel.org>2018-06-18 10:47:38 -0400
commit89a7e2f752859c1a1696adf7b00b6ca9a5da2cda (patch)
tree20efdb47f75740523ed334e1f3e816c7b07dc039
parentce397d215ccd07b8ae3f71db689aedb85d56ab40 (diff)
async_pq: Remove VLA usage
In the quest to remove VLAs from the kernel[1], this adjusts the allocation of coefs and blocks to use the existing maximum values (with one new define, MAX_DISKS for coefs, and a reuse of the existing NDISKS for blocks). [1] https://lkml.org/lkml/2018/3/7/621 Signed-off-by: Kyle Spiers <ksspiers@google.com> Reviewed-by: Kees Cook <keescook@chromium.org> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Vinod Koul <vkoul@kernel.org>
-rw-r--r--crypto/async_tx/async_pq.c10
-rw-r--r--crypto/async_tx/raid6test.c4
2 files changed, 9 insertions, 5 deletions
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 56bd612927ab..80dc567801ec 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -42,6 +42,8 @@ static struct page *pq_scribble_page;
42#define P(b, d) (b[d-2]) 42#define P(b, d) (b[d-2])
43#define Q(b, d) (b[d-1]) 43#define Q(b, d) (b[d-1])
44 44
45#define MAX_DISKS 255
46
45/** 47/**
46 * do_async_gen_syndrome - asynchronously calculate P and/or Q 48 * do_async_gen_syndrome - asynchronously calculate P and/or Q
47 */ 49 */
@@ -184,7 +186,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
184 struct dma_device *device = chan ? chan->device : NULL; 186 struct dma_device *device = chan ? chan->device : NULL;
185 struct dmaengine_unmap_data *unmap = NULL; 187 struct dmaengine_unmap_data *unmap = NULL;
186 188
187 BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); 189 BUG_ON(disks > MAX_DISKS || !(P(blocks, disks) || Q(blocks, disks)));
188 190
189 if (device) 191 if (device)
190 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); 192 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
@@ -196,7 +198,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
196 is_dma_pq_aligned(device, offset, 0, len)) { 198 is_dma_pq_aligned(device, offset, 0, len)) {
197 struct dma_async_tx_descriptor *tx; 199 struct dma_async_tx_descriptor *tx;
198 enum dma_ctrl_flags dma_flags = 0; 200 enum dma_ctrl_flags dma_flags = 0;
199 unsigned char coefs[src_cnt]; 201 unsigned char coefs[MAX_DISKS];
200 int i, j; 202 int i, j;
201 203
202 /* run the p+q asynchronously */ 204 /* run the p+q asynchronously */
@@ -299,11 +301,11 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
299 struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len); 301 struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
300 struct dma_device *device = chan ? chan->device : NULL; 302 struct dma_device *device = chan ? chan->device : NULL;
301 struct dma_async_tx_descriptor *tx; 303 struct dma_async_tx_descriptor *tx;
302 unsigned char coefs[disks-2]; 304 unsigned char coefs[MAX_DISKS];
303 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; 305 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
304 struct dmaengine_unmap_data *unmap = NULL; 306 struct dmaengine_unmap_data *unmap = NULL;
305 307
306 BUG_ON(disks < 4); 308 BUG_ON(disks < 4 || disks > MAX_DISKS);
307 309
308 if (device) 310 if (device)
309 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); 311 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c
index dad95f45b88f..a5edaabae12a 100644
--- a/crypto/async_tx/raid6test.c
+++ b/crypto/async_tx/raid6test.c
@@ -81,11 +81,13 @@ static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, stru
81 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); 81 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
82 tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit); 82 tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit);
83 } else { 83 } else {
84 struct page *blocks[disks]; 84 struct page *blocks[NDISKS];
85 struct page *dest; 85 struct page *dest;
86 int count = 0; 86 int count = 0;
87 int i; 87 int i;
88 88
89 BUG_ON(disks > NDISKS);
90
89 /* data+Q failure. Reconstruct data from P, 91 /* data+Q failure. Reconstruct data from P,
90 * then rebuild syndrome 92 * then rebuild syndrome
91 */ 93 */