aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2008-06-27 18:31:50 -0400
committerNeil Brown <neilb@notabene.brown>2008-06-27 18:31:50 -0400
commitb203886edbcaac3ca427cf4dbcb50b18bdb346fd (patch)
treed96cf939fd3a7ab454f61110b91b3a928eb5fe7c /drivers/md/raid5.c
parent0cd17fec983b6bca505eecee1af33138687220b6 (diff)
md: kill STRIPE_OP_MOD_DMA in raid5 offload
From: Dan Williams <dan.j.williams@intel.com> This micro-optimization allowed the raid code to skip a re-read of the parity block after checking parity. It took advantage of the fact that xor-offload-engines have their own internal result buffer and can check parity without writing to memory. Remove it for the following reasons: 1/ It is a layering violation for MD to need to manage the DMA and non-DMA paths within async_xor_zero_sum 2/ Bad precedent to toggle the 'ops' flags outside the lock 3/ Hard to realize a performance gain as reads will not need an updated parity block and writes will dirty it anyways. Signed-off-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Neil Brown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c10
1 files changed, 0 insertions, 10 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8c4e6149daea..60e61d2464b5 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -837,15 +837,10 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
837static void ops_complete_check(void *stripe_head_ref) 837static void ops_complete_check(void *stripe_head_ref)
838{ 838{
839 struct stripe_head *sh = stripe_head_ref; 839 struct stripe_head *sh = stripe_head_ref;
840 int pd_idx = sh->pd_idx;
841 840
842 pr_debug("%s: stripe %llu\n", __func__, 841 pr_debug("%s: stripe %llu\n", __func__,
843 (unsigned long long)sh->sector); 842 (unsigned long long)sh->sector);
844 843
845 if (test_and_clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending) &&
846 sh->ops.zero_sum_result == 0)
847 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
848
849 set_bit(STRIPE_OP_CHECK, &sh->ops.complete); 844 set_bit(STRIPE_OP_CHECK, &sh->ops.complete);
850 set_bit(STRIPE_HANDLE, &sh->state); 845 set_bit(STRIPE_HANDLE, &sh->state);
851 release_stripe(sh); 846 release_stripe(sh);
@@ -873,11 +868,6 @@ static void ops_run_check(struct stripe_head *sh)
873 tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 868 tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
874 &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); 869 &sh->ops.zero_sum_result, 0, NULL, NULL, NULL);
875 870
876 if (tx)
877 set_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending);
878 else
879 clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending);
880
881 atomic_inc(&sh->count); 871 atomic_inc(&sh->count);
882 tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, 872 tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx,
883 ops_complete_check, sh); 873 ops_complete_check, sh);