aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2007-01-02 15:52:31 -0500
committerDan Williams <dan.j.williams@intel.com>2007-07-13 11:06:17 -0400
commitf0a50d3754c7f1b7f05f45b1c0b35d20445316b5 (patch)
treec54b572ad4c4c9b48f887a9ecc28ec7b6166d552 /drivers/md
parentb5e98d65d34a1c11a2135ea8a9b2619dbc7216c8 (diff)
md: handle_stripe5 - add request/completion logic for async expand ops
When a stripe is being expanded bulk copying takes place to move the data from the old stripe to the new. Since raid5_run_ops only operates on one stripe at a time these bulk copies are handled in-line under the stripe lock. In the dma offload case we poll for the completion of the operation. After the data has been copied into the new stripe the parity needs to be recalculated across the new disks. We reuse the existing postxor functionality to carry out this calculation. By setting STRIPE_OP_POSTXOR without setting STRIPE_OP_BIODRAIN the completion path in handle stripe can differentiate expand operations from normal write operations. Signed-off-by: Dan Williams <dan.j.williams@intel.com> Acked-By: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid5.c50
1 files changed, 38 insertions, 12 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index a33dac7c2e2f..c6e0e2b26f60 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2653,6 +2653,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
2653 /* We have read all the blocks in this stripe and now we need to 2653 /* We have read all the blocks in this stripe and now we need to
2654 * copy some of them into a target stripe for expand. 2654 * copy some of them into a target stripe for expand.
2655 */ 2655 */
2656 struct dma_async_tx_descriptor *tx = NULL;
2656 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2657 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2657 for (i = 0; i < sh->disks; i++) 2658 for (i = 0; i < sh->disks; i++)
2658 if (i != sh->pd_idx && (r6s && i != r6s->qd_idx)) { 2659 if (i != sh->pd_idx && (r6s && i != r6s->qd_idx)) {
@@ -2678,9 +2679,12 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
2678 release_stripe(sh2); 2679 release_stripe(sh2);
2679 continue; 2680 continue;
2680 } 2681 }
2681 memcpy(page_address(sh2->dev[dd_idx].page), 2682
2682 page_address(sh->dev[i].page), 2683 /* place all the copies on one channel */
2683 STRIPE_SIZE); 2684 tx = async_memcpy(sh2->dev[dd_idx].page,
2685 sh->dev[i].page, 0, 0, STRIPE_SIZE,
2686 ASYNC_TX_DEP_ACK, tx, NULL, NULL);
2687
2684 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 2688 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
2685 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 2689 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
2686 for (j = 0; j < conf->raid_disks; j++) 2690 for (j = 0; j < conf->raid_disks; j++)
@@ -2693,6 +2697,12 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
2693 set_bit(STRIPE_HANDLE, &sh2->state); 2697 set_bit(STRIPE_HANDLE, &sh2->state);
2694 } 2698 }
2695 release_stripe(sh2); 2699 release_stripe(sh2);
2700
2701 /* done submitting copies, wait for them to complete */
2702 if (i + 1 >= sh->disks) {
2703 async_tx_ack(tx);
2704 dma_wait_for_async_tx(tx);
2705 }
2696 } 2706 }
2697} 2707}
2698 2708
@@ -2931,18 +2941,34 @@ static void handle_stripe5(struct stripe_head *sh)
2931 } 2941 }
2932 } 2942 }
2933 2943
2934 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { 2944 /* Finish postxor operations initiated by the expansion
2935 /* Need to write out all blocks after computing parity */ 2945 * process
2936 sh->disks = conf->raid_disks; 2946 */
2937 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, conf->raid_disks); 2947 if (test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete) &&
2938 compute_parity5(sh, RECONSTRUCT_WRITE); 2948 !test_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending)) {
2949
2950 clear_bit(STRIPE_EXPANDING, &sh->state);
2951
2952 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.pending);
2953 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack);
2954 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
2955
2939 for (i = conf->raid_disks; i--; ) { 2956 for (i = conf->raid_disks; i--; ) {
2940 set_bit(R5_LOCKED, &sh->dev[i].flags);
2941 s.locked++;
2942 set_bit(R5_Wantwrite, &sh->dev[i].flags); 2957 set_bit(R5_Wantwrite, &sh->dev[i].flags);
2958 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
2959 sh->ops.count++;
2943 } 2960 }
2944 clear_bit(STRIPE_EXPANDING, &sh->state); 2961 }
2945 } else if (s.expanded) { 2962
2963 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
2964 !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
2965 /* Need to write out all blocks after computing parity */
2966 sh->disks = conf->raid_disks;
2967 sh->pd_idx = stripe_to_pdidx(sh->sector, conf,
2968 conf->raid_disks);
2969 s.locked += handle_write_operations5(sh, 0, 1);
2970 } else if (s.expanded &&
2971 !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
2946 clear_bit(STRIPE_EXPAND_READY, &sh->state); 2972 clear_bit(STRIPE_EXPAND_READY, &sh->state);
2947 atomic_dec(&conf->reshape_stripes); 2973 atomic_dec(&conf->reshape_stripes);
2948 wake_up(&conf->wait_for_overlap); 2974 wake_up(&conf->wait_for_overlap);