aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2007-01-02 15:52:31 -0500
committerDan Williams <dan.j.williams@intel.com>2007-07-13 11:06:17 -0400
commitb5e98d65d34a1c11a2135ea8a9b2619dbc7216c8 (patch)
treebf88aad375bb0cbf6c346ec912c06c2607850b21 /drivers/md
parente89f89629b5de76e504d1be75c82c4a6b2419583 (diff)
md: handle_stripe5 - add request/completion logic for async read ops
When a read bio is attached to the stripe and the corresponding block is marked R5_UPTODATE, then a read (biofill) operation is scheduled to copy the data from the stripe cache to the bio buffer. handle_stripe flags the blocks to be operated on with the R5_Wantfill flag. If new read requests arrive while raid5_run_ops is running they will not be handled until handle_stripe is scheduled to run again. Changelog: * cleanup to_read and to_fill accounting * do not fail reads that have reached the cache Signed-off-by: Dan Williams <dan.j.williams@intel.com> Acked-By: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid5.c53
1 files changed, 25 insertions, 28 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 810cf831edda..a33dac7c2e2f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2049,9 +2049,12 @@ handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh,
2049 bi = bi2; 2049 bi = bi2;
2050 } 2050 }
2051 2051
2052 /* fail any reads if this device is non-operational */ 2052 /* fail any reads if this device is non-operational and
2053 if (!test_bit(R5_Insync, &sh->dev[i].flags) || 2053 * the data has not reached the cache yet.
2054 test_bit(R5_ReadError, &sh->dev[i].flags)) { 2054 */
2055 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
2056 (!test_bit(R5_Insync, &sh->dev[i].flags) ||
2057 test_bit(R5_ReadError, &sh->dev[i].flags))) {
2055 bi = sh->dev[i].toread; 2058 bi = sh->dev[i].toread;
2056 sh->dev[i].toread = NULL; 2059 sh->dev[i].toread = NULL;
2057 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2060 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
@@ -2740,37 +2743,27 @@ static void handle_stripe5(struct stripe_head *sh)
2740 struct r5dev *dev = &sh->dev[i]; 2743 struct r5dev *dev = &sh->dev[i];
2741 clear_bit(R5_Insync, &dev->flags); 2744 clear_bit(R5_Insync, &dev->flags);
2742 2745
2743 pr_debug("check %d: state 0x%lx read %p write %p written %p\n", 2746 pr_debug("check %d: state 0x%lx toread %p read %p write %p "
2744 i, dev->flags, dev->toread, dev->towrite, dev->written); 2747 "written %p\n", i, dev->flags, dev->toread, dev->read,
2745 /* maybe we can reply to a read */ 2748 dev->towrite, dev->written);
2746 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { 2749
2747 struct bio *rbi, *rbi2; 2750 /* maybe we can request a biofill operation
2748 pr_debug("Return read for disc %d\n", i); 2751 *
2749 spin_lock_irq(&conf->device_lock); 2752 * new wantfill requests are only permitted while
2750 rbi = dev->toread; 2753 * STRIPE_OP_BIOFILL is clear
2751 dev->toread = NULL; 2754 */
2752 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 2755 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
2753 wake_up(&conf->wait_for_overlap); 2756 !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending))
2754 spin_unlock_irq(&conf->device_lock); 2757 set_bit(R5_Wantfill, &dev->flags);
2755 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
2756 copy_data(0, rbi, dev->page, dev->sector);
2757 rbi2 = r5_next_bio(rbi, dev->sector);
2758 spin_lock_irq(&conf->device_lock);
2759 if (--rbi->bi_phys_segments == 0) {
2760 rbi->bi_next = return_bi;
2761 return_bi = rbi;
2762 }
2763 spin_unlock_irq(&conf->device_lock);
2764 rbi = rbi2;
2765 }
2766 }
2767 2758
2768 /* now count some things */ 2759 /* now count some things */
2769 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; 2760 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
2770 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; 2761 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
2771 if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++; 2762 if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++;
2772 2763
2773 if (dev->toread) 2764 if (test_bit(R5_Wantfill, &dev->flags))
2765 s.to_fill++;
2766 else if (dev->toread)
2774 s.to_read++; 2767 s.to_read++;
2775 if (dev->towrite) { 2768 if (dev->towrite) {
2776 s.to_write++; 2769 s.to_write++;
@@ -2793,6 +2786,10 @@ static void handle_stripe5(struct stripe_head *sh)
2793 set_bit(R5_Insync, &dev->flags); 2786 set_bit(R5_Insync, &dev->flags);
2794 } 2787 }
2795 rcu_read_unlock(); 2788 rcu_read_unlock();
2789
2790 if (s.to_fill && !test_and_set_bit(STRIPE_OP_BIOFILL, &sh->ops.pending))
2791 sh->ops.count++;
2792
2796 pr_debug("locked=%d uptodate=%d to_read=%d" 2793 pr_debug("locked=%d uptodate=%d to_read=%d"
2797 " to_write=%d failed=%d failed_num=%d\n", 2794 " to_write=%d failed=%d failed_num=%d\n",
2798 s.locked, s.uptodate, s.to_read, s.to_write, 2795 s.locked, s.uptodate, s.to_read, s.to_write,