diff options
author | Dan Williams <dan.j.williams@intel.com> | 2008-06-27 18:32:03 -0400 |
---|---|---|
committer | Neil Brown <neilb@notabene.brown> | 2008-06-27 18:32:03 -0400 |
commit | 976ea8d475675da6e86bd434328814ccbf5ae641 (patch) | |
tree | 087b3c8a44b6cfce0ff085faab0bc66871c455fb /drivers/md/raid5.c | |
parent | 83de75cc92be599850e5ef3928e07cd840833499 (diff) |
md: replace STRIPE_OP_COMPUTE_BLK with STRIPE_COMPUTE_RUN
From: Dan Williams <dan.j.williams@intel.com>
Track the state of compute operations (recalculating a block from all the other
blocks in a stripe) with a state flag. Reduces the scope of the
STRIPE_OP_COMPUTE_BLK flag to only tracking whether a compute operation has
been requested via the ops_request field of struct stripe_head_state.
Note, the compute operation that is performed in the course of doing a 'repair'
operation (check the parity block, recalculate it and write it back if the
check result is not zero) is tracked separately with the 'check_state'
variable. Compute operations are held off while a 'check' is in progress, and
moving this check out to handle_issuing_new_read_requests5 the helper routine
__handle_issuing_new_read_requests5 can be simplified.
This is another step towards the removal of ops.{pending,ack,complete,count},
i.e. STRIPE_OP_COMPUTE_BLK only requests an operation and does not track the
state of the operation.
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Neil Brown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r-- | drivers/md/raid5.c | 65 |
1 files changed, 18 insertions, 47 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index b9c0a32a4f95..835046bf384e 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -604,8 +604,6 @@ static void ops_complete_compute5(void *stripe_head_ref) | |||
604 | clear_bit(STRIPE_COMPUTE_RUN, &sh->state); | 604 | clear_bit(STRIPE_COMPUTE_RUN, &sh->state); |
605 | if (sh->check_state == check_state_compute_run) | 605 | if (sh->check_state == check_state_compute_run) |
606 | sh->check_state = check_state_compute_result; | 606 | sh->check_state = check_state_compute_result; |
607 | else | ||
608 | set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete); | ||
609 | set_bit(STRIPE_HANDLE, &sh->state); | 607 | set_bit(STRIPE_HANDLE, &sh->state); |
610 | release_stripe(sh); | 608 | release_stripe(sh); |
611 | } | 609 | } |
@@ -881,8 +879,7 @@ static void raid5_run_ops(struct stripe_head *sh, unsigned long pending, | |||
881 | overlap_clear++; | 879 | overlap_clear++; |
882 | } | 880 | } |
883 | 881 | ||
884 | if (test_bit(STRIPE_OP_COMPUTE_BLK, &pending) || | 882 | if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) |
885 | test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) | ||
886 | tx = ops_run_compute5(sh, pending); | 883 | tx = ops_run_compute5(sh, pending); |
887 | 884 | ||
888 | if (test_bit(STRIPE_OP_PREXOR, &pending)) | 885 | if (test_bit(STRIPE_OP_PREXOR, &pending)) |
@@ -1960,12 +1957,6 @@ static int __handle_issuing_new_read_requests5(struct stripe_head *sh, | |||
1960 | struct r5dev *dev = &sh->dev[disk_idx]; | 1957 | struct r5dev *dev = &sh->dev[disk_idx]; |
1961 | struct r5dev *failed_dev = &sh->dev[s->failed_num]; | 1958 | struct r5dev *failed_dev = &sh->dev[s->failed_num]; |
1962 | 1959 | ||
1963 | /* don't schedule compute operations or reads on the parity block while | ||
1964 | * a check is in flight | ||
1965 | */ | ||
1966 | if (disk_idx == sh->pd_idx && sh->check_state) | ||
1967 | return ~0; | ||
1968 | |||
1969 | /* is the data in this block needed, and can we get it? */ | 1960 | /* is the data in this block needed, and can we get it? */ |
1970 | if (!test_bit(R5_LOCKED, &dev->flags) && | 1961 | if (!test_bit(R5_LOCKED, &dev->flags) && |
1971 | !test_bit(R5_UPTODATE, &dev->flags) && (dev->toread || | 1962 | !test_bit(R5_UPTODATE, &dev->flags) && (dev->toread || |
@@ -1974,23 +1965,16 @@ static int __handle_issuing_new_read_requests5(struct stripe_head *sh, | |||
1974 | (failed_dev->toread || (failed_dev->towrite && | 1965 | (failed_dev->toread || (failed_dev->towrite && |
1975 | !test_bit(R5_OVERWRITE, &failed_dev->flags) | 1966 | !test_bit(R5_OVERWRITE, &failed_dev->flags) |
1976 | ))))) { | 1967 | ))))) { |
1977 | /* 1/ We would like to get this block, possibly by computing it, | 1968 | /* We would like to get this block, possibly by computing it, |
1978 | * but we might not be able to. | 1969 | * otherwise read it if the backing disk is insync |
1979 | * | ||
1980 | * 2/ Since parity check operations potentially make the parity | ||
1981 | * block !uptodate it will need to be refreshed before any | ||
1982 | * compute operations on data disks are scheduled. | ||
1983 | * | ||
1984 | * 3/ We hold off parity block re-reads until check operations | ||
1985 | * have quiesced. | ||
1986 | */ | 1970 | */ |
1987 | if ((s->uptodate == disks - 1) && !sh->check_state && | 1971 | if ((s->uptodate == disks - 1) && |
1988 | (s->failed && disk_idx == s->failed_num)) { | 1972 | (s->failed && disk_idx == s->failed_num)) { |
1989 | set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending); | 1973 | set_bit(STRIPE_COMPUTE_RUN, &sh->state); |
1974 | set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); | ||
1990 | set_bit(R5_Wantcompute, &dev->flags); | 1975 | set_bit(R5_Wantcompute, &dev->flags); |
1991 | sh->ops.target = disk_idx; | 1976 | sh->ops.target = disk_idx; |
1992 | s->req_compute = 1; | 1977 | s->req_compute = 1; |
1993 | sh->ops.count++; | ||
1994 | /* Careful: from this point on 'uptodate' is in the eye | 1978 | /* Careful: from this point on 'uptodate' is in the eye |
1995 | * of raid5_run_ops which services 'compute' operations | 1979 | * of raid5_run_ops which services 'compute' operations |
1996 | * before writes. R5_Wantcompute flags a block that will | 1980 | * before writes. R5_Wantcompute flags a block that will |
@@ -1999,12 +1983,7 @@ static int __handle_issuing_new_read_requests5(struct stripe_head *sh, | |||
1999 | */ | 1983 | */ |
2000 | s->uptodate++; | 1984 | s->uptodate++; |
2001 | return 0; /* uptodate + compute == disks */ | 1985 | return 0; /* uptodate + compute == disks */ |
2002 | } else if ((s->uptodate < disks - 1) && | 1986 | } else if (test_bit(R5_Insync, &dev->flags)) { |
2003 | test_bit(R5_Insync, &dev->flags)) { | ||
2004 | /* Note: we hold off compute operations while checks are | ||
2005 | * in flight, but we still prefer 'compute' over 'read' | ||
2006 | * hence we only read if (uptodate < * disks-1) | ||
2007 | */ | ||
2008 | set_bit(R5_LOCKED, &dev->flags); | 1987 | set_bit(R5_LOCKED, &dev->flags); |
2009 | set_bit(R5_Wantread, &dev->flags); | 1988 | set_bit(R5_Wantread, &dev->flags); |
2010 | s->locked++; | 1989 | s->locked++; |
@@ -2021,20 +2000,13 @@ static void handle_issuing_new_read_requests5(struct stripe_head *sh, | |||
2021 | { | 2000 | { |
2022 | int i; | 2001 | int i; |
2023 | 2002 | ||
2024 | /* Clear completed compute operations */ | ||
2025 | if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete)) { | ||
2026 | clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete); | ||
2027 | clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack); | ||
2028 | clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending); | ||
2029 | } | ||
2030 | |||
2031 | /* look for blocks to read/compute, skip this if a compute | 2003 | /* look for blocks to read/compute, skip this if a compute |
2032 | * is already in flight, or if the stripe contents are in the | 2004 | * is already in flight, or if the stripe contents are in the |
2033 | * midst of changing due to a write | 2005 | * midst of changing due to a write |
2034 | */ | 2006 | */ |
2035 | if (!test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) && | 2007 | if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && |
2036 | !test_bit(STRIPE_OP_PREXOR, &sh->ops.pending) && | 2008 | !test_bit(STRIPE_OP_PREXOR, &sh->ops.pending) && |
2037 | !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) { | 2009 | !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) { |
2038 | for (i = disks; i--; ) | 2010 | for (i = disks; i--; ) |
2039 | if (__handle_issuing_new_read_requests5( | 2011 | if (__handle_issuing_new_read_requests5( |
2040 | sh, s, i, disks) == 0) | 2012 | sh, s, i, disks) == 0) |
@@ -2236,10 +2208,9 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf, | |||
2236 | * simultaneously. If this is not the case then new writes need to be | 2208 | * simultaneously. If this is not the case then new writes need to be |
2237 | * held off until the compute completes. | 2209 | * held off until the compute completes. |
2238 | */ | 2210 | */ |
2239 | if ((s->req_compute || | 2211 | if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && |
2240 | !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) && | 2212 | (s->locked == 0 && (rcw == 0 || rmw == 0) && |
2241 | (s->locked == 0 && (rcw == 0 || rmw == 0) && | 2213 | !test_bit(STRIPE_BIT_DELAY, &sh->state))) |
2242 | !test_bit(STRIPE_BIT_DELAY, &sh->state))) | ||
2243 | s->locked += handle_write_operations5(sh, rcw == 0, 0); | 2214 | s->locked += handle_write_operations5(sh, rcw == 0, 0); |
2244 | } | 2215 | } |
2245 | 2216 | ||
@@ -2410,6 +2381,7 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, | |||
2410 | set_bit(STRIPE_INSYNC, &sh->state); | 2381 | set_bit(STRIPE_INSYNC, &sh->state); |
2411 | else { | 2382 | else { |
2412 | sh->check_state = check_state_compute_run; | 2383 | sh->check_state = check_state_compute_run; |
2384 | set_bit(STRIPE_COMPUTE_RUN, &sh->state); | ||
2413 | set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); | 2385 | set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); |
2414 | set_bit(R5_Wantcompute, | 2386 | set_bit(R5_Wantcompute, |
2415 | &sh->dev[sh->pd_idx].flags); | 2387 | &sh->dev[sh->pd_idx].flags); |
@@ -2725,8 +2697,7 @@ static void handle_stripe5(struct stripe_head *sh) | |||
2725 | * or to load a block that is being partially written. | 2697 | * or to load a block that is being partially written. |
2726 | */ | 2698 | */ |
2727 | if (s.to_read || s.non_overwrite || | 2699 | if (s.to_read || s.non_overwrite || |
2728 | (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding || | 2700 | (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding) |
2729 | test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) | ||
2730 | handle_issuing_new_read_requests5(sh, &s, disks); | 2701 | handle_issuing_new_read_requests5(sh, &s, disks); |
2731 | 2702 | ||
2732 | /* Now we check to see if any write operations have recently | 2703 | /* Now we check to see if any write operations have recently |
@@ -2803,7 +2774,7 @@ static void handle_stripe5(struct stripe_head *sh) | |||
2803 | */ | 2774 | */ |
2804 | if (sh->check_state || | 2775 | if (sh->check_state || |
2805 | (s.syncing && s.locked == 0 && | 2776 | (s.syncing && s.locked == 0 && |
2806 | !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) && | 2777 | !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && |
2807 | !test_bit(STRIPE_INSYNC, &sh->state))) | 2778 | !test_bit(STRIPE_INSYNC, &sh->state))) |
2808 | handle_parity_checks5(conf, sh, &s, disks); | 2779 | handle_parity_checks5(conf, sh, &s, disks); |
2809 | 2780 | ||
@@ -2869,7 +2840,7 @@ static void handle_stripe5(struct stripe_head *sh) | |||
2869 | } | 2840 | } |
2870 | 2841 | ||
2871 | if (s.expanding && s.locked == 0 && | 2842 | if (s.expanding && s.locked == 0 && |
2872 | !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) | 2843 | !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) |
2873 | handle_stripe_expansion(conf, sh, NULL); | 2844 | handle_stripe_expansion(conf, sh, NULL); |
2874 | 2845 | ||
2875 | if (sh->ops.count) | 2846 | if (sh->ops.count) |
@@ -3089,7 +3060,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
3089 | } | 3060 | } |
3090 | 3061 | ||
3091 | if (s.expanding && s.locked == 0 && | 3062 | if (s.expanding && s.locked == 0 && |
3092 | !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) | 3063 | !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) |
3093 | handle_stripe_expansion(conf, sh, &r6s); | 3064 | handle_stripe_expansion(conf, sh, &r6s); |
3094 | 3065 | ||
3095 | unlock: | 3066 | unlock: |