aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2008-06-27 18:31:52 -0400
committerNeil Brown <neilb@notabene.brown>2008-06-27 18:31:52 -0400
commit2b7497f0e0a0b9cf21d822e427d5399b2056501a (patch)
tree8cf4d8e056ddafe48d49af0d8afe600868d2d21b /drivers/md
parentb203886edbcaac3ca427cf4dbcb50b18bdb346fd (diff)
md: kill STRIPE_OP_IO flag
From: Dan Williams <dan.j.williams@intel.com> The R5_Want{Read,Write} flags already gate i/o. So, this flag is superfluous and we can unconditionally call ops_run_io(). Signed-off-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Neil Brown <neilb@suse.de>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid5.c32
1 files changed, 5 insertions, 27 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 60e61d2464b5..cac97080b278 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -373,8 +373,6 @@ static unsigned long get_stripe_work(struct stripe_head *sh)
373 test_and_ack_op(STRIPE_OP_BIODRAIN, pending); 373 test_and_ack_op(STRIPE_OP_BIODRAIN, pending);
374 test_and_ack_op(STRIPE_OP_POSTXOR, pending); 374 test_and_ack_op(STRIPE_OP_POSTXOR, pending);
375 test_and_ack_op(STRIPE_OP_CHECK, pending); 375 test_and_ack_op(STRIPE_OP_CHECK, pending);
376 if (test_and_clear_bit(STRIPE_OP_IO, &sh->ops.pending))
377 ack++;
378 376
379 sh->ops.count -= ack; 377 sh->ops.count -= ack;
380 if (unlikely(sh->ops.count < 0)) { 378 if (unlikely(sh->ops.count < 0)) {
@@ -399,7 +397,6 @@ static void ops_run_io(struct stripe_head *sh)
399 397
400 might_sleep(); 398 might_sleep();
401 399
402 set_bit(STRIPE_IO_STARTED, &sh->state);
403 for (i = disks; i--; ) { 400 for (i = disks; i--; ) {
404 int rw; 401 int rw;
405 struct bio *bi; 402 struct bio *bi;
@@ -433,6 +430,8 @@ static void ops_run_io(struct stripe_head *sh)
433 test_bit(STRIPE_EXPAND_READY, &sh->state)) 430 test_bit(STRIPE_EXPAND_READY, &sh->state))
434 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 431 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
435 432
433 set_bit(STRIPE_IO_STARTED, &sh->state);
434
436 bi->bi_bdev = rdev->bdev; 435 bi->bi_bdev = rdev->bdev;
437 pr_debug("%s: for %llu schedule op %ld on disc %d\n", 436 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
438 __func__, (unsigned long long)sh->sector, 437 __func__, (unsigned long long)sh->sector,
@@ -900,9 +899,6 @@ static void raid5_run_ops(struct stripe_head *sh, unsigned long pending)
900 if (test_bit(STRIPE_OP_CHECK, &pending)) 899 if (test_bit(STRIPE_OP_CHECK, &pending))
901 ops_run_check(sh); 900 ops_run_check(sh);
902 901
903 if (test_bit(STRIPE_OP_IO, &pending))
904 ops_run_io(sh);
905
906 if (overlap_clear) 902 if (overlap_clear)
907 for (i = disks; i--; ) { 903 for (i = disks; i--; ) {
908 struct r5dev *dev = &sh->dev[i]; 904 struct r5dev *dev = &sh->dev[i];
@@ -2013,8 +2009,6 @@ static int __handle_issuing_new_read_requests5(struct stripe_head *sh,
2013 */ 2009 */
2014 set_bit(R5_LOCKED, &dev->flags); 2010 set_bit(R5_LOCKED, &dev->flags);
2015 set_bit(R5_Wantread, &dev->flags); 2011 set_bit(R5_Wantread, &dev->flags);
2016 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
2017 sh->ops.count++;
2018 s->locked++; 2012 s->locked++;
2019 pr_debug("Reading block %d (sync=%d)\n", disk_idx, 2013 pr_debug("Reading block %d (sync=%d)\n", disk_idx,
2020 s->syncing); 2014 s->syncing);
@@ -2208,9 +2202,6 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
2208 "%d for r-m-w\n", i); 2202 "%d for r-m-w\n", i);
2209 set_bit(R5_LOCKED, &dev->flags); 2203 set_bit(R5_LOCKED, &dev->flags);
2210 set_bit(R5_Wantread, &dev->flags); 2204 set_bit(R5_Wantread, &dev->flags);
2211 if (!test_and_set_bit(
2212 STRIPE_OP_IO, &sh->ops.pending))
2213 sh->ops.count++;
2214 s->locked++; 2205 s->locked++;
2215 } else { 2206 } else {
2216 set_bit(STRIPE_DELAYED, &sh->state); 2207 set_bit(STRIPE_DELAYED, &sh->state);
@@ -2234,9 +2225,6 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
2234 "%d for Reconstruct\n", i); 2225 "%d for Reconstruct\n", i);
2235 set_bit(R5_LOCKED, &dev->flags); 2226 set_bit(R5_LOCKED, &dev->flags);
2236 set_bit(R5_Wantread, &dev->flags); 2227 set_bit(R5_Wantread, &dev->flags);
2237 if (!test_and_set_bit(
2238 STRIPE_OP_IO, &sh->ops.pending))
2239 sh->ops.count++;
2240 s->locked++; 2228 s->locked++;
2241 } else { 2229 } else {
2242 set_bit(STRIPE_DELAYED, &sh->state); 2230 set_bit(STRIPE_DELAYED, &sh->state);
@@ -2444,8 +2432,6 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
2444 2432
2445 set_bit(R5_LOCKED, &dev->flags); 2433 set_bit(R5_LOCKED, &dev->flags);
2446 set_bit(R5_Wantwrite, &dev->flags); 2434 set_bit(R5_Wantwrite, &dev->flags);
2447 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
2448 sh->ops.count++;
2449 2435
2450 clear_bit(STRIPE_DEGRADED, &sh->state); 2436 clear_bit(STRIPE_DEGRADED, &sh->state);
2451 s->locked++; 2437 s->locked++;
@@ -2801,9 +2787,6 @@ static void handle_stripe5(struct stripe_head *sh)
2801 (i == sh->pd_idx || dev->written)) { 2787 (i == sh->pd_idx || dev->written)) {
2802 pr_debug("Writing block %d\n", i); 2788 pr_debug("Writing block %d\n", i);
2803 set_bit(R5_Wantwrite, &dev->flags); 2789 set_bit(R5_Wantwrite, &dev->flags);
2804 if (!test_and_set_bit(
2805 STRIPE_OP_IO, &sh->ops.pending))
2806 sh->ops.count++;
2807 if (prexor) 2790 if (prexor)
2808 continue; 2791 continue;
2809 if (!test_bit(R5_Insync, &dev->flags) || 2792 if (!test_bit(R5_Insync, &dev->flags) ||
@@ -2857,16 +2840,12 @@ static void handle_stripe5(struct stripe_head *sh)
2857 dev = &sh->dev[s.failed_num]; 2840 dev = &sh->dev[s.failed_num];
2858 if (!test_bit(R5_ReWrite, &dev->flags)) { 2841 if (!test_bit(R5_ReWrite, &dev->flags)) {
2859 set_bit(R5_Wantwrite, &dev->flags); 2842 set_bit(R5_Wantwrite, &dev->flags);
2860 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
2861 sh->ops.count++;
2862 set_bit(R5_ReWrite, &dev->flags); 2843 set_bit(R5_ReWrite, &dev->flags);
2863 set_bit(R5_LOCKED, &dev->flags); 2844 set_bit(R5_LOCKED, &dev->flags);
2864 s.locked++; 2845 s.locked++;
2865 } else { 2846 } else {
2866 /* let's read it back */ 2847 /* let's read it back */
2867 set_bit(R5_Wantread, &dev->flags); 2848 set_bit(R5_Wantread, &dev->flags);
2868 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
2869 sh->ops.count++;
2870 set_bit(R5_LOCKED, &dev->flags); 2849 set_bit(R5_LOCKED, &dev->flags);
2871 s.locked++; 2850 s.locked++;
2872 } 2851 }
@@ -2884,13 +2863,10 @@ static void handle_stripe5(struct stripe_head *sh)
2884 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack); 2863 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack);
2885 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); 2864 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
2886 2865
2887 for (i = conf->raid_disks; i--; ) { 2866 for (i = conf->raid_disks; i--; )
2888 set_bit(R5_Wantwrite, &sh->dev[i].flags); 2867 set_bit(R5_Wantwrite, &sh->dev[i].flags);
2889 set_bit(R5_LOCKED, &dev->flags); 2868 set_bit(R5_LOCKED, &dev->flags);
2890 s.locked++; 2869 s.locked++;
2891 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
2892 sh->ops.count++;
2893 }
2894 } 2870 }
2895 2871
2896 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 2872 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
@@ -2926,6 +2902,8 @@ static void handle_stripe5(struct stripe_head *sh)
2926 if (pending) 2902 if (pending)
2927 raid5_run_ops(sh, pending); 2903 raid5_run_ops(sh, pending);
2928 2904
2905 ops_run_io(sh);
2906
2929 return_io(return_bi); 2907 return_io(return_bi);
2930 2908
2931} 2909}