aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/raid5.c180
1 files changed, 67 insertions, 113 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index cf60b15b4e3a..63acc51e8406 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2968,63 +2968,14 @@ static int handle_stripe5(struct stripe_head *sh, struct stripe_head_state *s)
2968 if (test_bit(R5_ReadError, &dev->flags)) 2968 if (test_bit(R5_ReadError, &dev->flags))
2969 clear_bit(R5_Insync, &dev->flags); 2969 clear_bit(R5_Insync, &dev->flags);
2970 if (!test_bit(R5_Insync, &dev->flags)) { 2970 if (!test_bit(R5_Insync, &dev->flags)) {
2971 if (s->failed < 2)
2972 s->failed_num[s->failed] = i;
2971 s->failed++; 2973 s->failed++;
2972 s->failed_num[0] = i;
2973 } 2974 }
2974 } 2975 }
2975 spin_unlock_irq(&conf->device_lock); 2976 spin_unlock_irq(&conf->device_lock);
2976 rcu_read_unlock(); 2977 rcu_read_unlock();
2977 2978
2978 if (unlikely(s->blocked_rdev)) {
2979 if (s->syncing || s->expanding || s->expanded ||
2980 s->to_write || s->written) {
2981 set_bit(STRIPE_HANDLE, &sh->state);
2982 return 1;
2983 }
2984 /* There is nothing for the blocked_rdev to block */
2985 rdev_dec_pending(s->blocked_rdev, conf->mddev);
2986 s->blocked_rdev = NULL;
2987 }
2988
2989 if (s->to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
2990 set_bit(STRIPE_OP_BIOFILL, &s->ops_request);
2991 set_bit(STRIPE_BIOFILL_RUN, &sh->state);
2992 }
2993
2994 pr_debug("locked=%d uptodate=%d to_read=%d"
2995 " to_write=%d failed=%d failed_num=%d\n",
2996 s->locked, s->uptodate, s->to_read, s->to_write,
2997 s->failed, s->failed_num[0]);
2998 /* check if the array has lost two devices and, if so, some requests might
2999 * need to be failed
3000 */
3001 if (s->failed > 1 && s->to_read+s->to_write+s->written)
3002 handle_failed_stripe(conf, sh, s, disks, &s->return_bi);
3003 if (s->failed > 1 && s->syncing) {
3004 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
3005 clear_bit(STRIPE_SYNCING, &sh->state);
3006 s->syncing = 0;
3007 }
3008
3009 /* might be able to return some write requests if the parity block
3010 * is safe, or on a failed drive
3011 */
3012 dev = &sh->dev[sh->pd_idx];
3013 if (s->written &&
3014 ((test_bit(R5_Insync, &dev->flags) &&
3015 !test_bit(R5_LOCKED, &dev->flags) &&
3016 test_bit(R5_UPTODATE, &dev->flags)) ||
3017 (s->failed == 1 && s->failed_num[0] == sh->pd_idx)))
3018 handle_stripe_clean_event(conf, sh, disks, &s->return_bi);
3019
3020 /* Now we might consider reading some blocks, either to check/generate
3021 * parity, or to satisfy requests
3022 * or to load a block that is being partially written.
3023 */
3024 if (s->to_read || s->non_overwrite ||
3025 (s->syncing && (s->uptodate + s->compute < disks)) || s->expanding)
3026 handle_stripe_fill(sh, s, disks);
3027
3028 return 0; 2979 return 0;
3029} 2980}
3030 2981
@@ -3032,8 +2983,8 @@ static int handle_stripe6(struct stripe_head *sh, struct stripe_head_state *s)
3032{ 2983{
3033 raid5_conf_t *conf = sh->raid_conf; 2984 raid5_conf_t *conf = sh->raid_conf;
3034 int disks = sh->disks; 2985 int disks = sh->disks;
3035 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx; 2986 struct r5dev *dev;
3036 struct r5dev *dev, *pdev, *qdev; 2987 int i;
3037 2988
3038 /* Now to look around and see what can be done */ 2989 /* Now to look around and see what can be done */
3039 2990
@@ -3107,65 +3058,6 @@ static int handle_stripe6(struct stripe_head *sh, struct stripe_head_state *s)
3107 spin_unlock_irq(&conf->device_lock); 3058 spin_unlock_irq(&conf->device_lock);
3108 rcu_read_unlock(); 3059 rcu_read_unlock();
3109 3060
3110 if (unlikely(s->blocked_rdev)) {
3111 if (s->syncing || s->expanding || s->expanded ||
3112 s->to_write || s->written) {
3113 set_bit(STRIPE_HANDLE, &sh->state);
3114 return 1;
3115 }
3116 /* There is nothing for the blocked_rdev to block */
3117 rdev_dec_pending(s->blocked_rdev, conf->mddev);
3118 s->blocked_rdev = NULL;
3119 }
3120
3121 if (s->to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
3122 set_bit(STRIPE_OP_BIOFILL, &s->ops_request);
3123 set_bit(STRIPE_BIOFILL_RUN, &sh->state);
3124 }
3125
3126 pr_debug("locked=%d uptodate=%d to_read=%d"
3127 " to_write=%d failed=%d failed_num=%d,%d\n",
3128 s->locked, s->uptodate, s->to_read, s->to_write, s->failed,
3129 s->failed_num[0], s->failed_num[1]);
3130 /* check if the array has lost >2 devices and, if so, some requests
3131 * might need to be failed
3132 */
3133 if (s->failed > 2 && s->to_read+s->to_write+s->written)
3134 handle_failed_stripe(conf, sh, s, disks, &s->return_bi);
3135 if (s->failed > 2 && s->syncing) {
3136 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
3137 clear_bit(STRIPE_SYNCING, &sh->state);
3138 s->syncing = 0;
3139 }
3140
3141 /*
3142 * might be able to return some write requests if the parity blocks
3143 * are safe, or on a failed drive
3144 */
3145 pdev = &sh->dev[pd_idx];
3146 s->p_failed = (s->failed >= 1 && s->failed_num[0] == pd_idx)
3147 || (s->failed >= 2 && s->failed_num[1] == pd_idx);
3148 qdev = &sh->dev[qd_idx];
3149 s->q_failed = (s->failed >= 1 && s->failed_num[0] == qd_idx)
3150 || (s->failed >= 2 && s->failed_num[1] == qd_idx);
3151
3152 if (s->written &&
3153 (s->p_failed || ((test_bit(R5_Insync, &pdev->flags)
3154 && !test_bit(R5_LOCKED, &pdev->flags)
3155 && test_bit(R5_UPTODATE, &pdev->flags)))) &&
3156 (s->q_failed || ((test_bit(R5_Insync, &qdev->flags)
3157 && !test_bit(R5_LOCKED, &qdev->flags)
3158 && test_bit(R5_UPTODATE, &qdev->flags)))))
3159 handle_stripe_clean_event(conf, sh, disks, &s->return_bi);
3160
3161 /* Now we might consider reading some blocks, either to check/generate
3162 * parity, or to satisfy requests
3163 * or to load a block that is being partially written.
3164 */
3165 if (s->to_read || s->non_overwrite || (s->to_write && s->failed) ||
3166 (s->syncing && (s->uptodate + s->compute < disks)) || s->expanding)
3167 handle_stripe_fill(sh, s, disks);
3168
3169 return 0; 3061 return 0;
3170} 3062}
3171 3063
@@ -3173,10 +3065,11 @@ static void handle_stripe(struct stripe_head *sh)
3173{ 3065{
3174 struct stripe_head_state s; 3066 struct stripe_head_state s;
3175 int done; 3067 int done;
3068 raid5_conf_t *conf = sh->raid_conf;
3176 int i; 3069 int i;
3177 int prexor; 3070 int prexor;
3178 int disks = sh->disks; 3071 int disks = sh->disks;
3179 raid5_conf_t *conf = sh->raid_conf; 3072 struct r5dev *pdev, *qdev;
3180 3073
3181 clear_bit(STRIPE_HANDLE, &sh->state); 3074 clear_bit(STRIPE_HANDLE, &sh->state);
3182 if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) { 3075 if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) {
@@ -3213,6 +3106,67 @@ static void handle_stripe(struct stripe_head *sh)
3213 if (done) 3106 if (done)
3214 goto finish; 3107 goto finish;
3215 3108
3109 if (unlikely(s.blocked_rdev)) {
3110 if (s.syncing || s.expanding || s.expanded ||
3111 s.to_write || s.written) {
3112 set_bit(STRIPE_HANDLE, &sh->state);
3113 goto finish;
3114 }
3115 /* There is nothing for the blocked_rdev to block */
3116 rdev_dec_pending(s.blocked_rdev, conf->mddev);
3117 s.blocked_rdev = NULL;
3118 }
3119
3120 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
3121 set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
3122 set_bit(STRIPE_BIOFILL_RUN, &sh->state);
3123 }
3124
3125 pr_debug("locked=%d uptodate=%d to_read=%d"
3126 " to_write=%d failed=%d failed_num=%d,%d\n",
3127 s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
3128 s.failed_num[0], s.failed_num[1]);
3129 /* check if the array has lost more than max_degraded devices and,
3130 * if so, some requests might need to be failed.
3131 */
3132 if (s.failed > conf->max_degraded && s.to_read+s.to_write+s.written)
3133 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
3134 if (s.failed > conf->max_degraded && s.syncing) {
3135 md_done_sync(conf->mddev, STRIPE_SECTORS, 0);
3136 clear_bit(STRIPE_SYNCING, &sh->state);
3137 s.syncing = 0;
3138 }
3139
3140 /*
3141 * might be able to return some write requests if the parity blocks
3142 * are safe, or on a failed drive
3143 */
3144 pdev = &sh->dev[sh->pd_idx];
3145 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
3146 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
3147 qdev = &sh->dev[sh->qd_idx];
3148 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
3149 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
3150 || conf->level < 6;
3151
3152 if (s.written &&
3153 (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
3154 && !test_bit(R5_LOCKED, &pdev->flags)
3155 && test_bit(R5_UPTODATE, &pdev->flags)))) &&
3156 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
3157 && !test_bit(R5_LOCKED, &qdev->flags)
3158 && test_bit(R5_UPTODATE, &qdev->flags)))))
3159 handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
3160
3161 /* Now we might consider reading some blocks, either to check/generate
3162 * parity, or to satisfy requests
3163 * or to load a block that is being partially written.
3164 */
3165 if (s.to_read || s.non_overwrite
3166 || (conf->level == 6 && s.to_write && s.failed)
3167 || (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
3168 handle_stripe_fill(sh, &s, disks);
3169
3216 /* Now we check to see if any write operations have recently 3170 /* Now we check to see if any write operations have recently
3217 * completed 3171 * completed
3218 */ 3172 */