diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-ioctl.c | 6 | ||||
-rw-r--r-- | drivers/md/dm-mpath.c | 15 | ||||
-rw-r--r-- | drivers/md/dm-stripe.c | 4 | ||||
-rw-r--r-- | drivers/md/dm.c | 40 | ||||
-rw-r--r-- | drivers/md/linear.c | 10 | ||||
-rw-r--r-- | drivers/md/md.c | 15 | ||||
-rw-r--r-- | drivers/md/multipath.c | 8 | ||||
-rw-r--r-- | drivers/md/raid0.c | 10 | ||||
-rw-r--r-- | drivers/md/raid1.c | 13 | ||||
-rw-r--r-- | drivers/md/raid10.c | 14 | ||||
-rw-r--r-- | drivers/md/raid5.c | 75 |
11 files changed, 138 insertions, 72 deletions
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 90e19f13f26..dca401dc70a 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c | |||
@@ -426,7 +426,7 @@ static int list_devices(struct dm_ioctl *param, size_t param_size) | |||
426 | old_nl->next = (uint32_t) ((void *) nl - | 426 | old_nl->next = (uint32_t) ((void *) nl - |
427 | (void *) old_nl); | 427 | (void *) old_nl); |
428 | disk = dm_disk(hc->md); | 428 | disk = dm_disk(hc->md); |
429 | nl->dev = huge_encode_dev(MKDEV(disk->major, disk->first_minor)); | 429 | nl->dev = huge_encode_dev(disk_devt(disk)); |
430 | nl->next = 0; | 430 | nl->next = 0; |
431 | strcpy(nl->name, hc->name); | 431 | strcpy(nl->name, hc->name); |
432 | 432 | ||
@@ -539,7 +539,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param) | |||
539 | if (dm_suspended(md)) | 539 | if (dm_suspended(md)) |
540 | param->flags |= DM_SUSPEND_FLAG; | 540 | param->flags |= DM_SUSPEND_FLAG; |
541 | 541 | ||
542 | param->dev = huge_encode_dev(MKDEV(disk->major, disk->first_minor)); | 542 | param->dev = huge_encode_dev(disk_devt(disk)); |
543 | 543 | ||
544 | /* | 544 | /* |
545 | * Yes, this will be out of date by the time it gets back | 545 | * Yes, this will be out of date by the time it gets back |
@@ -548,7 +548,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param) | |||
548 | */ | 548 | */ |
549 | param->open_count = dm_open_count(md); | 549 | param->open_count = dm_open_count(md); |
550 | 550 | ||
551 | if (disk->policy) | 551 | if (get_disk_ro(disk)) |
552 | param->flags |= DM_READONLY_FLAG; | 552 | param->flags |= DM_READONLY_FLAG; |
553 | 553 | ||
554 | param->event_nr = dm_get_event_nr(md); | 554 | param->event_nr = dm_get_event_nr(md); |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index b2ab8489d0e..103304c1e3b 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -34,6 +34,7 @@ struct pgpath { | |||
34 | unsigned fail_count; /* Cumulative failure count */ | 34 | unsigned fail_count; /* Cumulative failure count */ |
35 | 35 | ||
36 | struct dm_path path; | 36 | struct dm_path path; |
37 | struct work_struct deactivate_path; | ||
37 | }; | 38 | }; |
38 | 39 | ||
39 | #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) | 40 | #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) |
@@ -113,6 +114,7 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd; | |||
113 | static void process_queued_ios(struct work_struct *work); | 114 | static void process_queued_ios(struct work_struct *work); |
114 | static void trigger_event(struct work_struct *work); | 115 | static void trigger_event(struct work_struct *work); |
115 | static void activate_path(struct work_struct *work); | 116 | static void activate_path(struct work_struct *work); |
117 | static void deactivate_path(struct work_struct *work); | ||
116 | 118 | ||
117 | 119 | ||
118 | /*----------------------------------------------- | 120 | /*----------------------------------------------- |
@@ -123,8 +125,10 @@ static struct pgpath *alloc_pgpath(void) | |||
123 | { | 125 | { |
124 | struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL); | 126 | struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL); |
125 | 127 | ||
126 | if (pgpath) | 128 | if (pgpath) { |
127 | pgpath->is_active = 1; | 129 | pgpath->is_active = 1; |
130 | INIT_WORK(&pgpath->deactivate_path, deactivate_path); | ||
131 | } | ||
128 | 132 | ||
129 | return pgpath; | 133 | return pgpath; |
130 | } | 134 | } |
@@ -134,6 +138,14 @@ static void free_pgpath(struct pgpath *pgpath) | |||
134 | kfree(pgpath); | 138 | kfree(pgpath); |
135 | } | 139 | } |
136 | 140 | ||
141 | static void deactivate_path(struct work_struct *work) | ||
142 | { | ||
143 | struct pgpath *pgpath = | ||
144 | container_of(work, struct pgpath, deactivate_path); | ||
145 | |||
146 | blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue); | ||
147 | } | ||
148 | |||
137 | static struct priority_group *alloc_priority_group(void) | 149 | static struct priority_group *alloc_priority_group(void) |
138 | { | 150 | { |
139 | struct priority_group *pg; | 151 | struct priority_group *pg; |
@@ -873,6 +885,7 @@ static int fail_path(struct pgpath *pgpath) | |||
873 | pgpath->path.dev->name, m->nr_valid_paths); | 885 | pgpath->path.dev->name, m->nr_valid_paths); |
874 | 886 | ||
875 | queue_work(kmultipathd, &m->trigger_event); | 887 | queue_work(kmultipathd, &m->trigger_event); |
888 | queue_work(kmultipathd, &pgpath->deactivate_path); | ||
876 | 889 | ||
877 | out: | 890 | out: |
878 | spin_unlock_irqrestore(&m->lock, flags); | 891 | spin_unlock_irqrestore(&m->lock, flags); |
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 4de90ab3968..b745d8ac625 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c | |||
@@ -284,8 +284,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, | |||
284 | 284 | ||
285 | memset(major_minor, 0, sizeof(major_minor)); | 285 | memset(major_minor, 0, sizeof(major_minor)); |
286 | sprintf(major_minor, "%d:%d", | 286 | sprintf(major_minor, "%d:%d", |
287 | bio->bi_bdev->bd_disk->major, | 287 | MAJOR(disk_devt(bio->bi_bdev->bd_disk)), |
288 | bio->bi_bdev->bd_disk->first_minor); | 288 | MINOR(disk_devt(bio->bi_bdev->bd_disk))); |
289 | 289 | ||
290 | /* | 290 | /* |
291 | * Test to see which stripe drive triggered the event | 291 | * Test to see which stripe drive triggered the event |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ace998ce59f..327de03a5bd 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -377,13 +377,14 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio) | |||
377 | static void start_io_acct(struct dm_io *io) | 377 | static void start_io_acct(struct dm_io *io) |
378 | { | 378 | { |
379 | struct mapped_device *md = io->md; | 379 | struct mapped_device *md = io->md; |
380 | int cpu; | ||
380 | 381 | ||
381 | io->start_time = jiffies; | 382 | io->start_time = jiffies; |
382 | 383 | ||
383 | preempt_disable(); | 384 | cpu = part_stat_lock(); |
384 | disk_round_stats(dm_disk(md)); | 385 | part_round_stats(cpu, &dm_disk(md)->part0); |
385 | preempt_enable(); | 386 | part_stat_unlock(); |
386 | dm_disk(md)->in_flight = atomic_inc_return(&md->pending); | 387 | dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); |
387 | } | 388 | } |
388 | 389 | ||
389 | static int end_io_acct(struct dm_io *io) | 390 | static int end_io_acct(struct dm_io *io) |
@@ -391,15 +392,16 @@ static int end_io_acct(struct dm_io *io) | |||
391 | struct mapped_device *md = io->md; | 392 | struct mapped_device *md = io->md; |
392 | struct bio *bio = io->bio; | 393 | struct bio *bio = io->bio; |
393 | unsigned long duration = jiffies - io->start_time; | 394 | unsigned long duration = jiffies - io->start_time; |
394 | int pending; | 395 | int pending, cpu; |
395 | int rw = bio_data_dir(bio); | 396 | int rw = bio_data_dir(bio); |
396 | 397 | ||
397 | preempt_disable(); | 398 | cpu = part_stat_lock(); |
398 | disk_round_stats(dm_disk(md)); | 399 | part_round_stats(cpu, &dm_disk(md)->part0); |
399 | preempt_enable(); | 400 | part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration); |
400 | dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending); | 401 | part_stat_unlock(); |
401 | 402 | ||
402 | disk_stat_add(dm_disk(md), ticks[rw], duration); | 403 | dm_disk(md)->part0.in_flight = pending = |
404 | atomic_dec_return(&md->pending); | ||
403 | 405 | ||
404 | return !pending; | 406 | return !pending; |
405 | } | 407 | } |
@@ -885,6 +887,7 @@ static int dm_request(struct request_queue *q, struct bio *bio) | |||
885 | int r = -EIO; | 887 | int r = -EIO; |
886 | int rw = bio_data_dir(bio); | 888 | int rw = bio_data_dir(bio); |
887 | struct mapped_device *md = q->queuedata; | 889 | struct mapped_device *md = q->queuedata; |
890 | int cpu; | ||
888 | 891 | ||
889 | /* | 892 | /* |
890 | * There is no use in forwarding any barrier request since we can't | 893 | * There is no use in forwarding any barrier request since we can't |
@@ -897,8 +900,10 @@ static int dm_request(struct request_queue *q, struct bio *bio) | |||
897 | 900 | ||
898 | down_read(&md->io_lock); | 901 | down_read(&md->io_lock); |
899 | 902 | ||
900 | disk_stat_inc(dm_disk(md), ios[rw]); | 903 | cpu = part_stat_lock(); |
901 | disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio)); | 904 | part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]); |
905 | part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio)); | ||
906 | part_stat_unlock(); | ||
902 | 907 | ||
903 | /* | 908 | /* |
904 | * If we're suspended we have to queue | 909 | * If we're suspended we have to queue |
@@ -1146,7 +1151,7 @@ static void unlock_fs(struct mapped_device *md); | |||
1146 | 1151 | ||
1147 | static void free_dev(struct mapped_device *md) | 1152 | static void free_dev(struct mapped_device *md) |
1148 | { | 1153 | { |
1149 | int minor = md->disk->first_minor; | 1154 | int minor = MINOR(disk_devt(md->disk)); |
1150 | 1155 | ||
1151 | if (md->suspended_bdev) { | 1156 | if (md->suspended_bdev) { |
1152 | unlock_fs(md); | 1157 | unlock_fs(md); |
@@ -1182,7 +1187,7 @@ static void event_callback(void *context) | |||
1182 | list_splice_init(&md->uevent_list, &uevents); | 1187 | list_splice_init(&md->uevent_list, &uevents); |
1183 | spin_unlock_irqrestore(&md->uevent_lock, flags); | 1188 | spin_unlock_irqrestore(&md->uevent_lock, flags); |
1184 | 1189 | ||
1185 | dm_send_uevents(&uevents, &md->disk->dev.kobj); | 1190 | dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); |
1186 | 1191 | ||
1187 | atomic_inc(&md->event_nr); | 1192 | atomic_inc(&md->event_nr); |
1188 | wake_up(&md->eventq); | 1193 | wake_up(&md->eventq); |
@@ -1267,7 +1272,7 @@ static struct mapped_device *dm_find_md(dev_t dev) | |||
1267 | 1272 | ||
1268 | md = idr_find(&_minor_idr, minor); | 1273 | md = idr_find(&_minor_idr, minor); |
1269 | if (md && (md == MINOR_ALLOCED || | 1274 | if (md && (md == MINOR_ALLOCED || |
1270 | (dm_disk(md)->first_minor != minor) || | 1275 | (MINOR(disk_devt(dm_disk(md))) != minor) || |
1271 | test_bit(DMF_FREEING, &md->flags))) { | 1276 | test_bit(DMF_FREEING, &md->flags))) { |
1272 | md = NULL; | 1277 | md = NULL; |
1273 | goto out; | 1278 | goto out; |
@@ -1318,7 +1323,8 @@ void dm_put(struct mapped_device *md) | |||
1318 | 1323 | ||
1319 | if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { | 1324 | if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { |
1320 | map = dm_get_table(md); | 1325 | map = dm_get_table(md); |
1321 | idr_replace(&_minor_idr, MINOR_ALLOCED, dm_disk(md)->first_minor); | 1326 | idr_replace(&_minor_idr, MINOR_ALLOCED, |
1327 | MINOR(disk_devt(dm_disk(md)))); | ||
1322 | set_bit(DMF_FREEING, &md->flags); | 1328 | set_bit(DMF_FREEING, &md->flags); |
1323 | spin_unlock(&_minor_lock); | 1329 | spin_unlock(&_minor_lock); |
1324 | if (!dm_suspended(md)) { | 1330 | if (!dm_suspended(md)) { |
@@ -1638,7 +1644,7 @@ out: | |||
1638 | *---------------------------------------------------------------*/ | 1644 | *---------------------------------------------------------------*/ |
1639 | void dm_kobject_uevent(struct mapped_device *md) | 1645 | void dm_kobject_uevent(struct mapped_device *md) |
1640 | { | 1646 | { |
1641 | kobject_uevent(&md->disk->dev.kobj, KOBJ_CHANGE); | 1647 | kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE); |
1642 | } | 1648 | } |
1643 | 1649 | ||
1644 | uint32_t dm_next_uevent_seq(struct mapped_device *md) | 1650 | uint32_t dm_next_uevent_seq(struct mapped_device *md) |
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index b1eebf88c20..b9cbee688fa 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -318,14 +318,18 @@ static int linear_make_request (struct request_queue *q, struct bio *bio) | |||
318 | mddev_t *mddev = q->queuedata; | 318 | mddev_t *mddev = q->queuedata; |
319 | dev_info_t *tmp_dev; | 319 | dev_info_t *tmp_dev; |
320 | sector_t block; | 320 | sector_t block; |
321 | int cpu; | ||
321 | 322 | ||
322 | if (unlikely(bio_barrier(bio))) { | 323 | if (unlikely(bio_barrier(bio))) { |
323 | bio_endio(bio, -EOPNOTSUPP); | 324 | bio_endio(bio, -EOPNOTSUPP); |
324 | return 0; | 325 | return 0; |
325 | } | 326 | } |
326 | 327 | ||
327 | disk_stat_inc(mddev->gendisk, ios[rw]); | 328 | cpu = part_stat_lock(); |
328 | disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); | 329 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); |
330 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], | ||
331 | bio_sectors(bio)); | ||
332 | part_stat_unlock(); | ||
329 | 333 | ||
330 | tmp_dev = which_dev(mddev, bio->bi_sector); | 334 | tmp_dev = which_dev(mddev, bio->bi_sector); |
331 | block = bio->bi_sector >> 1; | 335 | block = bio->bi_sector >> 1; |
@@ -349,7 +353,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio) | |||
349 | * split it. | 353 | * split it. |
350 | */ | 354 | */ |
351 | struct bio_pair *bp; | 355 | struct bio_pair *bp; |
352 | bp = bio_split(bio, bio_split_pool, | 356 | bp = bio_split(bio, |
353 | ((tmp_dev->offset + tmp_dev->size)<<1) - bio->bi_sector); | 357 | ((tmp_dev->offset + tmp_dev->size)<<1) - bio->bi_sector); |
354 | if (linear_make_request(q, &bp->bio1)) | 358 | if (linear_make_request(q, &bp->bio1)) |
355 | generic_make_request(&bp->bio1); | 359 | generic_make_request(&bp->bio1); |
diff --git a/drivers/md/md.c b/drivers/md/md.c index deeac4b4417..0a3a4bdcd4a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -1464,10 +1464,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) | |||
1464 | if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) | 1464 | if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) |
1465 | goto fail; | 1465 | goto fail; |
1466 | 1466 | ||
1467 | if (rdev->bdev->bd_part) | 1467 | ko = &part_to_dev(rdev->bdev->bd_part)->kobj; |
1468 | ko = &rdev->bdev->bd_part->dev.kobj; | ||
1469 | else | ||
1470 | ko = &rdev->bdev->bd_disk->dev.kobj; | ||
1471 | if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) { | 1468 | if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) { |
1472 | kobject_del(&rdev->kobj); | 1469 | kobject_del(&rdev->kobj); |
1473 | goto fail; | 1470 | goto fail; |
@@ -3470,8 +3467,8 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data) | |||
3470 | disk->queue = mddev->queue; | 3467 | disk->queue = mddev->queue; |
3471 | add_disk(disk); | 3468 | add_disk(disk); |
3472 | mddev->gendisk = disk; | 3469 | mddev->gendisk = disk; |
3473 | error = kobject_init_and_add(&mddev->kobj, &md_ktype, &disk->dev.kobj, | 3470 | error = kobject_init_and_add(&mddev->kobj, &md_ktype, |
3474 | "%s", "md"); | 3471 | &disk_to_dev(disk)->kobj, "%s", "md"); |
3475 | mutex_unlock(&disks_mutex); | 3472 | mutex_unlock(&disks_mutex); |
3476 | if (error) | 3473 | if (error) |
3477 | printk(KERN_WARNING "md: cannot register %s/md - name in use\n", | 3474 | printk(KERN_WARNING "md: cannot register %s/md - name in use\n", |
@@ -3761,7 +3758,7 @@ static int do_md_run(mddev_t * mddev) | |||
3761 | sysfs_notify(&mddev->kobj, NULL, "array_state"); | 3758 | sysfs_notify(&mddev->kobj, NULL, "array_state"); |
3762 | sysfs_notify(&mddev->kobj, NULL, "sync_action"); | 3759 | sysfs_notify(&mddev->kobj, NULL, "sync_action"); |
3763 | sysfs_notify(&mddev->kobj, NULL, "degraded"); | 3760 | sysfs_notify(&mddev->kobj, NULL, "degraded"); |
3764 | kobject_uevent(&mddev->gendisk->dev.kobj, KOBJ_CHANGE); | 3761 | kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); |
3765 | return 0; | 3762 | return 0; |
3766 | } | 3763 | } |
3767 | 3764 | ||
@@ -5549,8 +5546,8 @@ static int is_mddev_idle(mddev_t *mddev) | |||
5549 | rcu_read_lock(); | 5546 | rcu_read_lock(); |
5550 | rdev_for_each_rcu(rdev, mddev) { | 5547 | rdev_for_each_rcu(rdev, mddev) { |
5551 | struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; | 5548 | struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; |
5552 | curr_events = disk_stat_read(disk, sectors[0]) + | 5549 | curr_events = part_stat_read(&disk->part0, sectors[0]) + |
5553 | disk_stat_read(disk, sectors[1]) - | 5550 | part_stat_read(&disk->part0, sectors[1]) - |
5554 | atomic_read(&disk->sync_io); | 5551 | atomic_read(&disk->sync_io); |
5555 | /* sync IO will cause sync_io to increase before the disk_stats | 5552 | /* sync IO will cause sync_io to increase before the disk_stats |
5556 | * as sync_io is counted when a request starts, and | 5553 | * as sync_io is counted when a request starts, and |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index c4779ccba1c..8bb8794129b 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -147,6 +147,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio) | |||
147 | struct multipath_bh * mp_bh; | 147 | struct multipath_bh * mp_bh; |
148 | struct multipath_info *multipath; | 148 | struct multipath_info *multipath; |
149 | const int rw = bio_data_dir(bio); | 149 | const int rw = bio_data_dir(bio); |
150 | int cpu; | ||
150 | 151 | ||
151 | if (unlikely(bio_barrier(bio))) { | 152 | if (unlikely(bio_barrier(bio))) { |
152 | bio_endio(bio, -EOPNOTSUPP); | 153 | bio_endio(bio, -EOPNOTSUPP); |
@@ -158,8 +159,11 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio) | |||
158 | mp_bh->master_bio = bio; | 159 | mp_bh->master_bio = bio; |
159 | mp_bh->mddev = mddev; | 160 | mp_bh->mddev = mddev; |
160 | 161 | ||
161 | disk_stat_inc(mddev->gendisk, ios[rw]); | 162 | cpu = part_stat_lock(); |
162 | disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); | 163 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); |
164 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], | ||
165 | bio_sectors(bio)); | ||
166 | part_stat_unlock(); | ||
163 | 167 | ||
164 | mp_bh->path = multipath_map(conf); | 168 | mp_bh->path = multipath_map(conf); |
165 | if (mp_bh->path < 0) { | 169 | if (mp_bh->path < 0) { |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 18361063566..53508a8a981 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -399,14 +399,18 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio) | |||
399 | sector_t chunk; | 399 | sector_t chunk; |
400 | sector_t block, rsect; | 400 | sector_t block, rsect; |
401 | const int rw = bio_data_dir(bio); | 401 | const int rw = bio_data_dir(bio); |
402 | int cpu; | ||
402 | 403 | ||
403 | if (unlikely(bio_barrier(bio))) { | 404 | if (unlikely(bio_barrier(bio))) { |
404 | bio_endio(bio, -EOPNOTSUPP); | 405 | bio_endio(bio, -EOPNOTSUPP); |
405 | return 0; | 406 | return 0; |
406 | } | 407 | } |
407 | 408 | ||
408 | disk_stat_inc(mddev->gendisk, ios[rw]); | 409 | cpu = part_stat_lock(); |
409 | disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); | 410 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); |
411 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], | ||
412 | bio_sectors(bio)); | ||
413 | part_stat_unlock(); | ||
410 | 414 | ||
411 | chunk_size = mddev->chunk_size >> 10; | 415 | chunk_size = mddev->chunk_size >> 10; |
412 | chunk_sects = mddev->chunk_size >> 9; | 416 | chunk_sects = mddev->chunk_size >> 9; |
@@ -423,7 +427,7 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio) | |||
423 | /* This is a one page bio that upper layers | 427 | /* This is a one page bio that upper layers |
424 | * refuse to split for us, so we need to split it. | 428 | * refuse to split for us, so we need to split it. |
425 | */ | 429 | */ |
426 | bp = bio_split(bio, bio_split_pool, chunk_sects - (bio->bi_sector & (chunk_sects - 1)) ); | 430 | bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1))); |
427 | if (raid0_make_request(q, &bp->bio1)) | 431 | if (raid0_make_request(q, &bp->bio1)) |
428 | generic_make_request(&bp->bio1); | 432 | generic_make_request(&bp->bio1); |
429 | if (raid0_make_request(q, &bp->bio2)) | 433 | if (raid0_make_request(q, &bp->bio2)) |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 03a5ab705c2..b9764429d85 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -779,7 +779,7 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
779 | struct page **behind_pages = NULL; | 779 | struct page **behind_pages = NULL; |
780 | const int rw = bio_data_dir(bio); | 780 | const int rw = bio_data_dir(bio); |
781 | const int do_sync = bio_sync(bio); | 781 | const int do_sync = bio_sync(bio); |
782 | int do_barriers; | 782 | int cpu, do_barriers; |
783 | mdk_rdev_t *blocked_rdev; | 783 | mdk_rdev_t *blocked_rdev; |
784 | 784 | ||
785 | /* | 785 | /* |
@@ -804,8 +804,11 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
804 | 804 | ||
805 | bitmap = mddev->bitmap; | 805 | bitmap = mddev->bitmap; |
806 | 806 | ||
807 | disk_stat_inc(mddev->gendisk, ios[rw]); | 807 | cpu = part_stat_lock(); |
808 | disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); | 808 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); |
809 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], | ||
810 | bio_sectors(bio)); | ||
811 | part_stat_unlock(); | ||
809 | 812 | ||
810 | /* | 813 | /* |
811 | * make_request() can abort the operation when READA is being | 814 | * make_request() can abort the operation when READA is being |
@@ -1302,9 +1305,6 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) | |||
1302 | sbio->bi_size = r1_bio->sectors << 9; | 1305 | sbio->bi_size = r1_bio->sectors << 9; |
1303 | sbio->bi_idx = 0; | 1306 | sbio->bi_idx = 0; |
1304 | sbio->bi_phys_segments = 0; | 1307 | sbio->bi_phys_segments = 0; |
1305 | sbio->bi_hw_segments = 0; | ||
1306 | sbio->bi_hw_front_size = 0; | ||
1307 | sbio->bi_hw_back_size = 0; | ||
1308 | sbio->bi_flags &= ~(BIO_POOL_MASK - 1); | 1308 | sbio->bi_flags &= ~(BIO_POOL_MASK - 1); |
1309 | sbio->bi_flags |= 1 << BIO_UPTODATE; | 1309 | sbio->bi_flags |= 1 << BIO_UPTODATE; |
1310 | sbio->bi_next = NULL; | 1310 | sbio->bi_next = NULL; |
@@ -1790,7 +1790,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
1790 | bio->bi_vcnt = 0; | 1790 | bio->bi_vcnt = 0; |
1791 | bio->bi_idx = 0; | 1791 | bio->bi_idx = 0; |
1792 | bio->bi_phys_segments = 0; | 1792 | bio->bi_phys_segments = 0; |
1793 | bio->bi_hw_segments = 0; | ||
1794 | bio->bi_size = 0; | 1793 | bio->bi_size = 0; |
1795 | bio->bi_end_io = NULL; | 1794 | bio->bi_end_io = NULL; |
1796 | bio->bi_private = NULL; | 1795 | bio->bi_private = NULL; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index e34cd0e6247..8bdc9bfc288 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -789,6 +789,7 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
789 | mirror_info_t *mirror; | 789 | mirror_info_t *mirror; |
790 | r10bio_t *r10_bio; | 790 | r10bio_t *r10_bio; |
791 | struct bio *read_bio; | 791 | struct bio *read_bio; |
792 | int cpu; | ||
792 | int i; | 793 | int i; |
793 | int chunk_sects = conf->chunk_mask + 1; | 794 | int chunk_sects = conf->chunk_mask + 1; |
794 | const int rw = bio_data_dir(bio); | 795 | const int rw = bio_data_dir(bio); |
@@ -816,7 +817,7 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
816 | /* This is a one page bio that upper layers | 817 | /* This is a one page bio that upper layers |
817 | * refuse to split for us, so we need to split it. | 818 | * refuse to split for us, so we need to split it. |
818 | */ | 819 | */ |
819 | bp = bio_split(bio, bio_split_pool, | 820 | bp = bio_split(bio, |
820 | chunk_sects - (bio->bi_sector & (chunk_sects - 1)) ); | 821 | chunk_sects - (bio->bi_sector & (chunk_sects - 1)) ); |
821 | if (make_request(q, &bp->bio1)) | 822 | if (make_request(q, &bp->bio1)) |
822 | generic_make_request(&bp->bio1); | 823 | generic_make_request(&bp->bio1); |
@@ -843,8 +844,11 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
843 | */ | 844 | */ |
844 | wait_barrier(conf); | 845 | wait_barrier(conf); |
845 | 846 | ||
846 | disk_stat_inc(mddev->gendisk, ios[rw]); | 847 | cpu = part_stat_lock(); |
847 | disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); | 848 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); |
849 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], | ||
850 | bio_sectors(bio)); | ||
851 | part_stat_unlock(); | ||
848 | 852 | ||
849 | r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); | 853 | r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); |
850 | 854 | ||
@@ -1345,9 +1349,6 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio) | |||
1345 | tbio->bi_size = r10_bio->sectors << 9; | 1349 | tbio->bi_size = r10_bio->sectors << 9; |
1346 | tbio->bi_idx = 0; | 1350 | tbio->bi_idx = 0; |
1347 | tbio->bi_phys_segments = 0; | 1351 | tbio->bi_phys_segments = 0; |
1348 | tbio->bi_hw_segments = 0; | ||
1349 | tbio->bi_hw_front_size = 0; | ||
1350 | tbio->bi_hw_back_size = 0; | ||
1351 | tbio->bi_flags &= ~(BIO_POOL_MASK - 1); | 1352 | tbio->bi_flags &= ~(BIO_POOL_MASK - 1); |
1352 | tbio->bi_flags |= 1 << BIO_UPTODATE; | 1353 | tbio->bi_flags |= 1 << BIO_UPTODATE; |
1353 | tbio->bi_next = NULL; | 1354 | tbio->bi_next = NULL; |
@@ -1947,7 +1948,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
1947 | bio->bi_vcnt = 0; | 1948 | bio->bi_vcnt = 0; |
1948 | bio->bi_idx = 0; | 1949 | bio->bi_idx = 0; |
1949 | bio->bi_phys_segments = 0; | 1950 | bio->bi_phys_segments = 0; |
1950 | bio->bi_hw_segments = 0; | ||
1951 | bio->bi_size = 0; | 1951 | bio->bi_size = 0; |
1952 | } | 1952 | } |
1953 | 1953 | ||
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 224de022e7c..ae16794bef2 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -101,6 +101,40 @@ | |||
101 | const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); | 101 | const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); |
102 | #endif | 102 | #endif |
103 | 103 | ||
104 | /* | ||
105 | * We maintain a biased count of active stripes in the bottom 16 bits of | ||
106 | * bi_phys_segments, and a count of processed stripes in the upper 16 bits | ||
107 | */ | ||
108 | static inline int raid5_bi_phys_segments(struct bio *bio) | ||
109 | { | ||
110 | return bio->bi_phys_segments & 0xffff; | ||
111 | } | ||
112 | |||
113 | static inline int raid5_bi_hw_segments(struct bio *bio) | ||
114 | { | ||
115 | return (bio->bi_phys_segments >> 16) & 0xffff; | ||
116 | } | ||
117 | |||
118 | static inline int raid5_dec_bi_phys_segments(struct bio *bio) | ||
119 | { | ||
120 | --bio->bi_phys_segments; | ||
121 | return raid5_bi_phys_segments(bio); | ||
122 | } | ||
123 | |||
124 | static inline int raid5_dec_bi_hw_segments(struct bio *bio) | ||
125 | { | ||
126 | unsigned short val = raid5_bi_hw_segments(bio); | ||
127 | |||
128 | --val; | ||
129 | bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio); | ||
130 | return val; | ||
131 | } | ||
132 | |||
133 | static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) | ||
134 | { | ||
135 | bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16); | ||
136 | } | ||
137 | |||
104 | static inline int raid6_next_disk(int disk, int raid_disks) | 138 | static inline int raid6_next_disk(int disk, int raid_disks) |
105 | { | 139 | { |
106 | disk++; | 140 | disk++; |
@@ -507,7 +541,7 @@ static void ops_complete_biofill(void *stripe_head_ref) | |||
507 | while (rbi && rbi->bi_sector < | 541 | while (rbi && rbi->bi_sector < |
508 | dev->sector + STRIPE_SECTORS) { | 542 | dev->sector + STRIPE_SECTORS) { |
509 | rbi2 = r5_next_bio(rbi, dev->sector); | 543 | rbi2 = r5_next_bio(rbi, dev->sector); |
510 | if (--rbi->bi_phys_segments == 0) { | 544 | if (!raid5_dec_bi_phys_segments(rbi)) { |
511 | rbi->bi_next = return_bi; | 545 | rbi->bi_next = return_bi; |
512 | return_bi = rbi; | 546 | return_bi = rbi; |
513 | } | 547 | } |
@@ -1725,7 +1759,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in | |||
1725 | if (*bip) | 1759 | if (*bip) |
1726 | bi->bi_next = *bip; | 1760 | bi->bi_next = *bip; |
1727 | *bip = bi; | 1761 | *bip = bi; |
1728 | bi->bi_phys_segments ++; | 1762 | bi->bi_phys_segments++; |
1729 | spin_unlock_irq(&conf->device_lock); | 1763 | spin_unlock_irq(&conf->device_lock); |
1730 | spin_unlock(&sh->lock); | 1764 | spin_unlock(&sh->lock); |
1731 | 1765 | ||
@@ -1819,7 +1853,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh, | |||
1819 | sh->dev[i].sector + STRIPE_SECTORS) { | 1853 | sh->dev[i].sector + STRIPE_SECTORS) { |
1820 | struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); | 1854 | struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); |
1821 | clear_bit(BIO_UPTODATE, &bi->bi_flags); | 1855 | clear_bit(BIO_UPTODATE, &bi->bi_flags); |
1822 | if (--bi->bi_phys_segments == 0) { | 1856 | if (!raid5_dec_bi_phys_segments(bi)) { |
1823 | md_write_end(conf->mddev); | 1857 | md_write_end(conf->mddev); |
1824 | bi->bi_next = *return_bi; | 1858 | bi->bi_next = *return_bi; |
1825 | *return_bi = bi; | 1859 | *return_bi = bi; |
@@ -1834,7 +1868,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh, | |||
1834 | sh->dev[i].sector + STRIPE_SECTORS) { | 1868 | sh->dev[i].sector + STRIPE_SECTORS) { |
1835 | struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); | 1869 | struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); |
1836 | clear_bit(BIO_UPTODATE, &bi->bi_flags); | 1870 | clear_bit(BIO_UPTODATE, &bi->bi_flags); |
1837 | if (--bi->bi_phys_segments == 0) { | 1871 | if (!raid5_dec_bi_phys_segments(bi)) { |
1838 | md_write_end(conf->mddev); | 1872 | md_write_end(conf->mddev); |
1839 | bi->bi_next = *return_bi; | 1873 | bi->bi_next = *return_bi; |
1840 | *return_bi = bi; | 1874 | *return_bi = bi; |
@@ -1858,7 +1892,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh, | |||
1858 | struct bio *nextbi = | 1892 | struct bio *nextbi = |
1859 | r5_next_bio(bi, sh->dev[i].sector); | 1893 | r5_next_bio(bi, sh->dev[i].sector); |
1860 | clear_bit(BIO_UPTODATE, &bi->bi_flags); | 1894 | clear_bit(BIO_UPTODATE, &bi->bi_flags); |
1861 | if (--bi->bi_phys_segments == 0) { | 1895 | if (!raid5_dec_bi_phys_segments(bi)) { |
1862 | bi->bi_next = *return_bi; | 1896 | bi->bi_next = *return_bi; |
1863 | *return_bi = bi; | 1897 | *return_bi = bi; |
1864 | } | 1898 | } |
@@ -2033,7 +2067,7 @@ static void handle_stripe_clean_event(raid5_conf_t *conf, | |||
2033 | while (wbi && wbi->bi_sector < | 2067 | while (wbi && wbi->bi_sector < |
2034 | dev->sector + STRIPE_SECTORS) { | 2068 | dev->sector + STRIPE_SECTORS) { |
2035 | wbi2 = r5_next_bio(wbi, dev->sector); | 2069 | wbi2 = r5_next_bio(wbi, dev->sector); |
2036 | if (--wbi->bi_phys_segments == 0) { | 2070 | if (!raid5_dec_bi_phys_segments(wbi)) { |
2037 | md_write_end(conf->mddev); | 2071 | md_write_end(conf->mddev); |
2038 | wbi->bi_next = *return_bi; | 2072 | wbi->bi_next = *return_bi; |
2039 | *return_bi = wbi; | 2073 | *return_bi = wbi; |
@@ -2814,7 +2848,7 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
2814 | copy_data(0, rbi, dev->page, dev->sector); | 2848 | copy_data(0, rbi, dev->page, dev->sector); |
2815 | rbi2 = r5_next_bio(rbi, dev->sector); | 2849 | rbi2 = r5_next_bio(rbi, dev->sector); |
2816 | spin_lock_irq(&conf->device_lock); | 2850 | spin_lock_irq(&conf->device_lock); |
2817 | if (--rbi->bi_phys_segments == 0) { | 2851 | if (!raid5_dec_bi_phys_segments(rbi)) { |
2818 | rbi->bi_next = return_bi; | 2852 | rbi->bi_next = return_bi; |
2819 | return_bi = rbi; | 2853 | return_bi = rbi; |
2820 | } | 2854 | } |
@@ -3155,8 +3189,11 @@ static struct bio *remove_bio_from_retry(raid5_conf_t *conf) | |||
3155 | if(bi) { | 3189 | if(bi) { |
3156 | conf->retry_read_aligned_list = bi->bi_next; | 3190 | conf->retry_read_aligned_list = bi->bi_next; |
3157 | bi->bi_next = NULL; | 3191 | bi->bi_next = NULL; |
3192 | /* | ||
3193 | * this sets the active strip count to 1 and the processed | ||
3194 | * strip count to zero (upper 8 bits) | ||
3195 | */ | ||
3158 | bi->bi_phys_segments = 1; /* biased count of active stripes */ | 3196 | bi->bi_phys_segments = 1; /* biased count of active stripes */ |
3159 | bi->bi_hw_segments = 0; /* count of processed stripes */ | ||
3160 | } | 3197 | } |
3161 | 3198 | ||
3162 | return bi; | 3199 | return bi; |
@@ -3206,8 +3243,7 @@ static int bio_fits_rdev(struct bio *bi) | |||
3206 | if ((bi->bi_size>>9) > q->max_sectors) | 3243 | if ((bi->bi_size>>9) > q->max_sectors) |
3207 | return 0; | 3244 | return 0; |
3208 | blk_recount_segments(q, bi); | 3245 | blk_recount_segments(q, bi); |
3209 | if (bi->bi_phys_segments > q->max_phys_segments || | 3246 | if (bi->bi_phys_segments > q->max_phys_segments) |
3210 | bi->bi_hw_segments > q->max_hw_segments) | ||
3211 | return 0; | 3247 | return 0; |
3212 | 3248 | ||
3213 | if (q->merge_bvec_fn) | 3249 | if (q->merge_bvec_fn) |
@@ -3351,7 +3387,7 @@ static int make_request(struct request_queue *q, struct bio * bi) | |||
3351 | sector_t logical_sector, last_sector; | 3387 | sector_t logical_sector, last_sector; |
3352 | struct stripe_head *sh; | 3388 | struct stripe_head *sh; |
3353 | const int rw = bio_data_dir(bi); | 3389 | const int rw = bio_data_dir(bi); |
3354 | int remaining; | 3390 | int cpu, remaining; |
3355 | 3391 | ||
3356 | if (unlikely(bio_barrier(bi))) { | 3392 | if (unlikely(bio_barrier(bi))) { |
3357 | bio_endio(bi, -EOPNOTSUPP); | 3393 | bio_endio(bi, -EOPNOTSUPP); |
@@ -3360,8 +3396,11 @@ static int make_request(struct request_queue *q, struct bio * bi) | |||
3360 | 3396 | ||
3361 | md_write_start(mddev, bi); | 3397 | md_write_start(mddev, bi); |
3362 | 3398 | ||
3363 | disk_stat_inc(mddev->gendisk, ios[rw]); | 3399 | cpu = part_stat_lock(); |
3364 | disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi)); | 3400 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); |
3401 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], | ||
3402 | bio_sectors(bi)); | ||
3403 | part_stat_unlock(); | ||
3365 | 3404 | ||
3366 | if (rw == READ && | 3405 | if (rw == READ && |
3367 | mddev->reshape_position == MaxSector && | 3406 | mddev->reshape_position == MaxSector && |
@@ -3468,7 +3507,7 @@ static int make_request(struct request_queue *q, struct bio * bi) | |||
3468 | 3507 | ||
3469 | } | 3508 | } |
3470 | spin_lock_irq(&conf->device_lock); | 3509 | spin_lock_irq(&conf->device_lock); |
3471 | remaining = --bi->bi_phys_segments; | 3510 | remaining = raid5_dec_bi_phys_segments(bi); |
3472 | spin_unlock_irq(&conf->device_lock); | 3511 | spin_unlock_irq(&conf->device_lock); |
3473 | if (remaining == 0) { | 3512 | if (remaining == 0) { |
3474 | 3513 | ||
@@ -3752,7 +3791,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
3752 | sector += STRIPE_SECTORS, | 3791 | sector += STRIPE_SECTORS, |
3753 | scnt++) { | 3792 | scnt++) { |
3754 | 3793 | ||
3755 | if (scnt < raid_bio->bi_hw_segments) | 3794 | if (scnt < raid5_bi_hw_segments(raid_bio)) |
3756 | /* already done this stripe */ | 3795 | /* already done this stripe */ |
3757 | continue; | 3796 | continue; |
3758 | 3797 | ||
@@ -3760,7 +3799,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
3760 | 3799 | ||
3761 | if (!sh) { | 3800 | if (!sh) { |
3762 | /* failed to get a stripe - must wait */ | 3801 | /* failed to get a stripe - must wait */ |
3763 | raid_bio->bi_hw_segments = scnt; | 3802 | raid5_set_bi_hw_segments(raid_bio, scnt); |
3764 | conf->retry_read_aligned = raid_bio; | 3803 | conf->retry_read_aligned = raid_bio; |
3765 | return handled; | 3804 | return handled; |
3766 | } | 3805 | } |
@@ -3768,7 +3807,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
3768 | set_bit(R5_ReadError, &sh->dev[dd_idx].flags); | 3807 | set_bit(R5_ReadError, &sh->dev[dd_idx].flags); |
3769 | if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { | 3808 | if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { |
3770 | release_stripe(sh); | 3809 | release_stripe(sh); |
3771 | raid_bio->bi_hw_segments = scnt; | 3810 | raid5_set_bi_hw_segments(raid_bio, scnt); |
3772 | conf->retry_read_aligned = raid_bio; | 3811 | conf->retry_read_aligned = raid_bio; |
3773 | return handled; | 3812 | return handled; |
3774 | } | 3813 | } |
@@ -3778,7 +3817,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
3778 | handled++; | 3817 | handled++; |
3779 | } | 3818 | } |
3780 | spin_lock_irq(&conf->device_lock); | 3819 | spin_lock_irq(&conf->device_lock); |
3781 | remaining = --raid_bio->bi_phys_segments; | 3820 | remaining = raid5_dec_bi_phys_segments(raid_bio); |
3782 | spin_unlock_irq(&conf->device_lock); | 3821 | spin_unlock_irq(&conf->device_lock); |
3783 | if (remaining == 0) | 3822 | if (remaining == 0) |
3784 | bio_endio(raid_bio, 0); | 3823 | bio_endio(raid_bio, 0); |