diff options
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r-- | drivers/md/dm.c | 52 |
1 files changed, 31 insertions, 21 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index bca448e11878..327de03a5bdf 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -377,13 +377,14 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio) | |||
377 | static void start_io_acct(struct dm_io *io) | 377 | static void start_io_acct(struct dm_io *io) |
378 | { | 378 | { |
379 | struct mapped_device *md = io->md; | 379 | struct mapped_device *md = io->md; |
380 | int cpu; | ||
380 | 381 | ||
381 | io->start_time = jiffies; | 382 | io->start_time = jiffies; |
382 | 383 | ||
383 | preempt_disable(); | 384 | cpu = part_stat_lock(); |
384 | disk_round_stats(dm_disk(md)); | 385 | part_round_stats(cpu, &dm_disk(md)->part0); |
385 | preempt_enable(); | 386 | part_stat_unlock(); |
386 | dm_disk(md)->in_flight = atomic_inc_return(&md->pending); | 387 | dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); |
387 | } | 388 | } |
388 | 389 | ||
389 | static int end_io_acct(struct dm_io *io) | 390 | static int end_io_acct(struct dm_io *io) |
@@ -391,15 +392,16 @@ static int end_io_acct(struct dm_io *io) | |||
391 | struct mapped_device *md = io->md; | 392 | struct mapped_device *md = io->md; |
392 | struct bio *bio = io->bio; | 393 | struct bio *bio = io->bio; |
393 | unsigned long duration = jiffies - io->start_time; | 394 | unsigned long duration = jiffies - io->start_time; |
394 | int pending; | 395 | int pending, cpu; |
395 | int rw = bio_data_dir(bio); | 396 | int rw = bio_data_dir(bio); |
396 | 397 | ||
397 | preempt_disable(); | 398 | cpu = part_stat_lock(); |
398 | disk_round_stats(dm_disk(md)); | 399 | part_round_stats(cpu, &dm_disk(md)->part0); |
399 | preempt_enable(); | 400 | part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration); |
400 | dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending); | 401 | part_stat_unlock(); |
401 | 402 | ||
402 | disk_stat_add(dm_disk(md), ticks[rw], duration); | 403 | dm_disk(md)->part0.in_flight = pending = |
404 | atomic_dec_return(&md->pending); | ||
403 | 405 | ||
404 | return !pending; | 406 | return !pending; |
405 | } | 407 | } |
@@ -837,12 +839,14 @@ static int dm_merge_bvec(struct request_queue *q, | |||
837 | struct dm_table *map = dm_get_table(md); | 839 | struct dm_table *map = dm_get_table(md); |
838 | struct dm_target *ti; | 840 | struct dm_target *ti; |
839 | sector_t max_sectors; | 841 | sector_t max_sectors; |
840 | int max_size; | 842 | int max_size = 0; |
841 | 843 | ||
842 | if (unlikely(!map)) | 844 | if (unlikely(!map)) |
843 | return 0; | 845 | goto out; |
844 | 846 | ||
845 | ti = dm_table_find_target(map, bvm->bi_sector); | 847 | ti = dm_table_find_target(map, bvm->bi_sector); |
848 | if (!dm_target_is_valid(ti)) | ||
849 | goto out_table; | ||
846 | 850 | ||
847 | /* | 851 | /* |
848 | * Find maximum amount of I/O that won't need splitting | 852 | * Find maximum amount of I/O that won't need splitting |
@@ -861,14 +865,16 @@ static int dm_merge_bvec(struct request_queue *q, | |||
861 | if (max_size && ti->type->merge) | 865 | if (max_size && ti->type->merge) |
862 | max_size = ti->type->merge(ti, bvm, biovec, max_size); | 866 | max_size = ti->type->merge(ti, bvm, biovec, max_size); |
863 | 867 | ||
868 | out_table: | ||
869 | dm_table_put(map); | ||
870 | |||
871 | out: | ||
864 | /* | 872 | /* |
865 | * Always allow an entire first page | 873 | * Always allow an entire first page |
866 | */ | 874 | */ |
867 | if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) | 875 | if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) |
868 | max_size = biovec->bv_len; | 876 | max_size = biovec->bv_len; |
869 | 877 | ||
870 | dm_table_put(map); | ||
871 | |||
872 | return max_size; | 878 | return max_size; |
873 | } | 879 | } |
874 | 880 | ||
@@ -881,6 +887,7 @@ static int dm_request(struct request_queue *q, struct bio *bio) | |||
881 | int r = -EIO; | 887 | int r = -EIO; |
882 | int rw = bio_data_dir(bio); | 888 | int rw = bio_data_dir(bio); |
883 | struct mapped_device *md = q->queuedata; | 889 | struct mapped_device *md = q->queuedata; |
890 | int cpu; | ||
884 | 891 | ||
885 | /* | 892 | /* |
886 | * There is no use in forwarding any barrier request since we can't | 893 | * There is no use in forwarding any barrier request since we can't |
@@ -893,8 +900,10 @@ static int dm_request(struct request_queue *q, struct bio *bio) | |||
893 | 900 | ||
894 | down_read(&md->io_lock); | 901 | down_read(&md->io_lock); |
895 | 902 | ||
896 | disk_stat_inc(dm_disk(md), ios[rw]); | 903 | cpu = part_stat_lock(); |
897 | disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio)); | 904 | part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]); |
905 | part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio)); | ||
906 | part_stat_unlock(); | ||
898 | 907 | ||
899 | /* | 908 | /* |
900 | * If we're suspended we have to queue | 909 | * If we're suspended we have to queue |
@@ -1142,7 +1151,7 @@ static void unlock_fs(struct mapped_device *md); | |||
1142 | 1151 | ||
1143 | static void free_dev(struct mapped_device *md) | 1152 | static void free_dev(struct mapped_device *md) |
1144 | { | 1153 | { |
1145 | int minor = md->disk->first_minor; | 1154 | int minor = MINOR(disk_devt(md->disk)); |
1146 | 1155 | ||
1147 | if (md->suspended_bdev) { | 1156 | if (md->suspended_bdev) { |
1148 | unlock_fs(md); | 1157 | unlock_fs(md); |
@@ -1178,7 +1187,7 @@ static void event_callback(void *context) | |||
1178 | list_splice_init(&md->uevent_list, &uevents); | 1187 | list_splice_init(&md->uevent_list, &uevents); |
1179 | spin_unlock_irqrestore(&md->uevent_lock, flags); | 1188 | spin_unlock_irqrestore(&md->uevent_lock, flags); |
1180 | 1189 | ||
1181 | dm_send_uevents(&uevents, &md->disk->dev.kobj); | 1190 | dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); |
1182 | 1191 | ||
1183 | atomic_inc(&md->event_nr); | 1192 | atomic_inc(&md->event_nr); |
1184 | wake_up(&md->eventq); | 1193 | wake_up(&md->eventq); |
@@ -1263,7 +1272,7 @@ static struct mapped_device *dm_find_md(dev_t dev) | |||
1263 | 1272 | ||
1264 | md = idr_find(&_minor_idr, minor); | 1273 | md = idr_find(&_minor_idr, minor); |
1265 | if (md && (md == MINOR_ALLOCED || | 1274 | if (md && (md == MINOR_ALLOCED || |
1266 | (dm_disk(md)->first_minor != minor) || | 1275 | (MINOR(disk_devt(dm_disk(md))) != minor) || |
1267 | test_bit(DMF_FREEING, &md->flags))) { | 1276 | test_bit(DMF_FREEING, &md->flags))) { |
1268 | md = NULL; | 1277 | md = NULL; |
1269 | goto out; | 1278 | goto out; |
@@ -1314,7 +1323,8 @@ void dm_put(struct mapped_device *md) | |||
1314 | 1323 | ||
1315 | if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { | 1324 | if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { |
1316 | map = dm_get_table(md); | 1325 | map = dm_get_table(md); |
1317 | idr_replace(&_minor_idr, MINOR_ALLOCED, dm_disk(md)->first_minor); | 1326 | idr_replace(&_minor_idr, MINOR_ALLOCED, |
1327 | MINOR(disk_devt(dm_disk(md)))); | ||
1318 | set_bit(DMF_FREEING, &md->flags); | 1328 | set_bit(DMF_FREEING, &md->flags); |
1319 | spin_unlock(&_minor_lock); | 1329 | spin_unlock(&_minor_lock); |
1320 | if (!dm_suspended(md)) { | 1330 | if (!dm_suspended(md)) { |
@@ -1634,7 +1644,7 @@ out: | |||
1634 | *---------------------------------------------------------------*/ | 1644 | *---------------------------------------------------------------*/ |
1635 | void dm_kobject_uevent(struct mapped_device *md) | 1645 | void dm_kobject_uevent(struct mapped_device *md) |
1636 | { | 1646 | { |
1637 | kobject_uevent(&md->disk->dev.kobj, KOBJ_CHANGE); | 1647 | kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE); |
1638 | } | 1648 | } |
1639 | 1649 | ||
1640 | uint32_t dm_next_uevent_seq(struct mapped_device *md) | 1650 | uint32_t dm_next_uevent_seq(struct mapped_device *md) |