aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2007-01-02 15:52:31 -0500
committerDan Williams <dan.j.williams@intel.com>2007-07-13 11:06:17 -0400
commit830ea01673a397798d1281d2022615559f5001bb (patch)
tree33413032374605648a1e47f059b93ffc39e33717 /drivers/md/raid5.c
parentf0a50d3754c7f1b7f05f45b1c0b35d20445316b5 (diff)
md: handle_stripe5 - request io processing in raid5_run_ops
I/O submission requests were already handled outside of the stripe lock in handle_stripe. Now that handle_stripe is only tasked with finding work, this logic belongs in raid5_run_ops. Signed-off-by: Dan Williams <dan.j.williams@intel.com> Acked-By: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c71
1 files changed, 13 insertions, 58 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index c6e0e2b26f60..7e1cc07f3177 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2326,6 +2326,9 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
2326 "%d for r-m-w\n", i); 2326 "%d for r-m-w\n", i);
2327 set_bit(R5_LOCKED, &dev->flags); 2327 set_bit(R5_LOCKED, &dev->flags);
2328 set_bit(R5_Wantread, &dev->flags); 2328 set_bit(R5_Wantread, &dev->flags);
2329 if (!test_and_set_bit(
2330 STRIPE_OP_IO, &sh->ops.pending))
2331 sh->ops.count++;
2329 s->locked++; 2332 s->locked++;
2330 } else { 2333 } else {
2331 set_bit(STRIPE_DELAYED, &sh->state); 2334 set_bit(STRIPE_DELAYED, &sh->state);
@@ -2349,6 +2352,9 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
2349 "%d for Reconstruct\n", i); 2352 "%d for Reconstruct\n", i);
2350 set_bit(R5_LOCKED, &dev->flags); 2353 set_bit(R5_LOCKED, &dev->flags);
2351 set_bit(R5_Wantread, &dev->flags); 2354 set_bit(R5_Wantread, &dev->flags);
2355 if (!test_and_set_bit(
2356 STRIPE_OP_IO, &sh->ops.pending))
2357 sh->ops.count++;
2352 s->locked++; 2358 s->locked++;
2353 } else { 2359 } else {
2354 set_bit(STRIPE_DELAYED, &sh->state); 2360 set_bit(STRIPE_DELAYED, &sh->state);
@@ -2545,6 +2551,9 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
2545 2551
2546 set_bit(R5_LOCKED, &dev->flags); 2552 set_bit(R5_LOCKED, &dev->flags);
2547 set_bit(R5_Wantwrite, &dev->flags); 2553 set_bit(R5_Wantwrite, &dev->flags);
2554 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
2555 sh->ops.count++;
2556
2548 clear_bit(STRIPE_DEGRADED, &sh->state); 2557 clear_bit(STRIPE_DEGRADED, &sh->state);
2549 s->locked++; 2558 s->locked++;
2550 set_bit(STRIPE_INSYNC, &sh->state); 2559 set_bit(STRIPE_INSYNC, &sh->state);
@@ -2930,12 +2939,16 @@ static void handle_stripe5(struct stripe_head *sh)
2930 dev = &sh->dev[s.failed_num]; 2939 dev = &sh->dev[s.failed_num];
2931 if (!test_bit(R5_ReWrite, &dev->flags)) { 2940 if (!test_bit(R5_ReWrite, &dev->flags)) {
2932 set_bit(R5_Wantwrite, &dev->flags); 2941 set_bit(R5_Wantwrite, &dev->flags);
2942 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
2943 sh->ops.count++;
2933 set_bit(R5_ReWrite, &dev->flags); 2944 set_bit(R5_ReWrite, &dev->flags);
2934 set_bit(R5_LOCKED, &dev->flags); 2945 set_bit(R5_LOCKED, &dev->flags);
2935 s.locked++; 2946 s.locked++;
2936 } else { 2947 } else {
2937 /* let's read it back */ 2948 /* let's read it back */
2938 set_bit(R5_Wantread, &dev->flags); 2949 set_bit(R5_Wantread, &dev->flags);
2950 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
2951 sh->ops.count++;
2939 set_bit(R5_LOCKED, &dev->flags); 2952 set_bit(R5_LOCKED, &dev->flags);
2940 s.locked++; 2953 s.locked++;
2941 } 2954 }
@@ -2988,64 +3001,6 @@ static void handle_stripe5(struct stripe_head *sh)
2988 3001
2989 return_io(return_bi); 3002 return_io(return_bi);
2990 3003
2991 for (i=disks; i-- ;) {
2992 int rw;
2993 struct bio *bi;
2994 mdk_rdev_t *rdev;
2995 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
2996 rw = WRITE;
2997 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
2998 rw = READ;
2999 else
3000 continue;
3001
3002 bi = &sh->dev[i].req;
3003
3004 bi->bi_rw = rw;
3005 if (rw == WRITE)
3006 bi->bi_end_io = raid5_end_write_request;
3007 else
3008 bi->bi_end_io = raid5_end_read_request;
3009
3010 rcu_read_lock();
3011 rdev = rcu_dereference(conf->disks[i].rdev);
3012 if (rdev && test_bit(Faulty, &rdev->flags))
3013 rdev = NULL;
3014 if (rdev)
3015 atomic_inc(&rdev->nr_pending);
3016 rcu_read_unlock();
3017
3018 if (rdev) {
3019 if (s.syncing || s.expanding || s.expanded)
3020 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
3021
3022 bi->bi_bdev = rdev->bdev;
3023 pr_debug("for %llu schedule op %ld on disc %d\n",
3024 (unsigned long long)sh->sector, bi->bi_rw, i);
3025 atomic_inc(&sh->count);
3026 bi->bi_sector = sh->sector + rdev->data_offset;
3027 bi->bi_flags = 1 << BIO_UPTODATE;
3028 bi->bi_vcnt = 1;
3029 bi->bi_max_vecs = 1;
3030 bi->bi_idx = 0;
3031 bi->bi_io_vec = &sh->dev[i].vec;
3032 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
3033 bi->bi_io_vec[0].bv_offset = 0;
3034 bi->bi_size = STRIPE_SIZE;
3035 bi->bi_next = NULL;
3036 if (rw == WRITE &&
3037 test_bit(R5_ReWrite, &sh->dev[i].flags))
3038 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
3039 generic_make_request(bi);
3040 } else {
3041 if (rw == WRITE)
3042 set_bit(STRIPE_DEGRADED, &sh->state);
3043 pr_debug("skip op %ld on disc %d for sector %llu\n",
3044 bi->bi_rw, i, (unsigned long long)sh->sector);
3045 clear_bit(R5_LOCKED, &sh->dev[i].flags);
3046 set_bit(STRIPE_HANDLE, &sh->state);
3047 }
3048 }
3049} 3004}
3050 3005
3051static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) 3006static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)