aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/async_tx/async_pq.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2009-10-16 01:40:25 -0400
committerNeilBrown <neilb@suse.de>2009-10-16 01:40:25 -0400
commit5dd33c9a4c29015f6d87568d33521c98931a387e (patch)
treea79ac38e1957580743b708410b54694f2b1cbf77 /crypto/async_tx/async_pq.c
parent5e5e3e78ed9038b8f7112835d07084eefb9daa47 (diff)
md/async: don't pass a memory pointer as a page pointer.
md/raid6 passes a list of 'struct page *' to the async_tx routines, which then either DMA map them for offload, or take the page_address for CPU based calculations. For RAID6 we sometime leave 'blanks' in the list of pages. For CPU based calcs, we want to treat theses as a page of zeros. For offloaded calculations, we simply don't pass a page to the hardware. Currently the 'blanks' are encoded as a pointer to raid6_empty_zero_page. This is a 4096 byte memory region, not a 'struct page'. This is mostly handled correctly but is rather ugly. So change the code to pass and expect a NULL pointer for the blanks. When taking page_address of a page, we need to check for a NULL and in that case use raid6_empty_zero_page. Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'crypto/async_tx/async_pq.c')
-rw-r--r--crypto/async_tx/async_pq.c15
1 files changed, 4 insertions, 11 deletions
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index b88db6d1dc65..9ab1ce4af3cc 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -30,11 +30,6 @@
30 */ 30 */
31static struct page *scribble; 31static struct page *scribble;
32 32
33static bool is_raid6_zero_block(struct page *p)
34{
35 return p == (void *) raid6_empty_zero_page;
36}
37
38/* the struct page *blocks[] parameter passed to async_gen_syndrome() 33/* the struct page *blocks[] parameter passed to async_gen_syndrome()
39 * and async_syndrome_val() contains the 'P' destination address at 34 * and async_syndrome_val() contains the 'P' destination address at
40 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1] 35 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
@@ -83,7 +78,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
83 * sources and update the coefficients accordingly 78 * sources and update the coefficients accordingly
84 */ 79 */
85 for (i = 0, idx = 0; i < src_cnt; i++) { 80 for (i = 0, idx = 0; i < src_cnt; i++) {
86 if (is_raid6_zero_block(blocks[i])) 81 if (blocks[i] == NULL)
87 continue; 82 continue;
88 dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, 83 dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
89 DMA_TO_DEVICE); 84 DMA_TO_DEVICE);
@@ -160,9 +155,9 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
160 srcs = (void **) blocks; 155 srcs = (void **) blocks;
161 156
162 for (i = 0; i < disks; i++) { 157 for (i = 0; i < disks; i++) {
163 if (is_raid6_zero_block(blocks[i])) { 158 if (blocks[i] == NULL) {
164 BUG_ON(i > disks - 3); /* P or Q can't be zero */ 159 BUG_ON(i > disks - 3); /* P or Q can't be zero */
165 srcs[i] = blocks[i]; 160 srcs[i] = (void*)raid6_empty_zero_page;
166 } else 161 } else
167 srcs[i] = page_address(blocks[i]) + offset; 162 srcs[i] = page_address(blocks[i]) + offset;
168 } 163 }
@@ -290,12 +285,10 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
290 if (submit->flags & ASYNC_TX_FENCE) 285 if (submit->flags & ASYNC_TX_FENCE)
291 dma_flags |= DMA_PREP_FENCE; 286 dma_flags |= DMA_PREP_FENCE;
292 for (i = 0; i < disks; i++) 287 for (i = 0; i < disks; i++)
293 if (likely(blocks[i])) { 288 if (likely(blocks[i]))
294 BUG_ON(is_raid6_zero_block(blocks[i]));
295 dma_src[i] = dma_map_page(dev, blocks[i], 289 dma_src[i] = dma_map_page(dev, blocks[i],
296 offset, len, 290 offset, len,
297 DMA_TO_DEVICE); 291 DMA_TO_DEVICE);
298 }
299 292
300 for (;;) { 293 for (;;) {
301 tx = device->device_prep_dma_pq_val(chan, pq, dma_src, 294 tx = device->device_prep_dma_pq_val(chan, pq, dma_src,