aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-04-05 18:29:01 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-04-05 18:29:01 -0400
commit44148a667d3715f3a1c37eeff7e954c946cc1efe (patch)
treefd8685dd17dcef45993b733f4ec4df97e6808304 /drivers
parentd0de4dc584ec6aa3b26fffea320a8457827768fc (diff)
parent782b86e2656762382ae1c2686d8d5c91f7d5eacf (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-2.6-block
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-2.6-block: ide: always ensure that blk_delay_queue() is called if we have pending IO block: fix request sorting at unplug dm: improve block integrity support fs: export empty_aops ide: ide_requeue_and_plug() reinstate "always plug" behaviour blk-throttle: don't call xchg on bool ufs: remove unessecary blk_flush_plug block: make the flush insertion use the tail of the dispatch list block: get rid of elv_insert() interface block: dump request state on seeing a corrupted request completion
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ide/ide-io.c43
-rw-r--r--drivers/md/dm-table.c114
2 files changed, 101 insertions, 56 deletions
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 0e406d73b2c8..177db6d5b2f5 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -430,6 +430,26 @@ static inline void ide_unlock_host(struct ide_host *host)
430 } 430 }
431} 431}
432 432
433static void __ide_requeue_and_plug(struct request_queue *q, struct request *rq)
434{
435 if (rq)
436 blk_requeue_request(q, rq);
437 if (rq || blk_peek_request(q)) {
438 /* Use 3ms as that was the old plug delay */
439 blk_delay_queue(q, 3);
440 }
441}
442
443void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
444{
445 struct request_queue *q = drive->queue;
446 unsigned long flags;
447
448 spin_lock_irqsave(q->queue_lock, flags);
449 __ide_requeue_and_plug(q, rq);
450 spin_unlock_irqrestore(q->queue_lock, flags);
451}
452
433/* 453/*
434 * Issue a new request to a device. 454 * Issue a new request to a device.
435 */ 455 */
@@ -550,28 +570,7 @@ plug_device:
550 ide_unlock_host(host); 570 ide_unlock_host(host);
551plug_device_2: 571plug_device_2:
552 spin_lock_irq(q->queue_lock); 572 spin_lock_irq(q->queue_lock);
553 573 __ide_requeue_and_plug(q, rq);
554 if (rq) {
555 blk_requeue_request(q, rq);
556 blk_delay_queue(q, queue_run_ms);
557 }
558}
559
560void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
561{
562 struct request_queue *q = drive->queue;
563 unsigned long flags;
564
565 spin_lock_irqsave(q->queue_lock, flags);
566
567 if (rq)
568 blk_requeue_request(q, rq);
569
570 spin_unlock_irqrestore(q->queue_lock, flags);
571
572 /* Use 3ms as that was the old plug delay */
573 if (rq)
574 blk_delay_queue(q, 3);
575} 574}
576 575
577static int drive_is_ready(ide_drive_t *drive) 576static int drive_is_ready(ide_drive_t *drive)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 416d4e258df6..cb8380c9767f 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -927,20 +927,80 @@ static int dm_table_build_index(struct dm_table *t)
927} 927}
928 928
929/* 929/*
930 * Get a disk whose integrity profile reflects the table's profile.
931 * If %match_all is true, all devices' profiles must match.
932 * If %match_all is false, all devices must at least have an
933 * allocated integrity profile; but uninitialized is ok.
934 * Returns NULL if integrity support was inconsistent or unavailable.
935 */
936static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
937 bool match_all)
938{
939 struct list_head *devices = dm_table_get_devices(t);
940 struct dm_dev_internal *dd = NULL;
941 struct gendisk *prev_disk = NULL, *template_disk = NULL;
942
943 list_for_each_entry(dd, devices, list) {
944 template_disk = dd->dm_dev.bdev->bd_disk;
945 if (!blk_get_integrity(template_disk))
946 goto no_integrity;
947 if (!match_all && !blk_integrity_is_initialized(template_disk))
948 continue; /* skip uninitialized profiles */
949 else if (prev_disk &&
950 blk_integrity_compare(prev_disk, template_disk) < 0)
951 goto no_integrity;
952 prev_disk = template_disk;
953 }
954
955 return template_disk;
956
957no_integrity:
958 if (prev_disk)
959 DMWARN("%s: integrity not set: %s and %s profile mismatch",
960 dm_device_name(t->md),
961 prev_disk->disk_name,
962 template_disk->disk_name);
963 return NULL;
964}
965
966/*
930 * Register the mapped device for blk_integrity support if 967 * Register the mapped device for blk_integrity support if
931 * the underlying devices support it. 968 * the underlying devices have an integrity profile. But all devices
969 * may not have matching profiles (checking all devices isn't reliable
970 * during table load because this table may use other DM device(s) which
971 * must be resumed before they will have an initialized integity profile).
972 * Stacked DM devices force a 2 stage integrity profile validation:
973 * 1 - during load, validate all initialized integrity profiles match
974 * 2 - during resume, validate all integrity profiles match
932 */ 975 */
933static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md) 976static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
934{ 977{
935 struct list_head *devices = dm_table_get_devices(t); 978 struct gendisk *template_disk = NULL;
936 struct dm_dev_internal *dd;
937 979
938 list_for_each_entry(dd, devices, list) 980 template_disk = dm_table_get_integrity_disk(t, false);
939 if (bdev_get_integrity(dd->dm_dev.bdev)) { 981 if (!template_disk)
940 t->integrity_supported = 1; 982 return 0;
941 return blk_integrity_register(dm_disk(md), NULL);
942 }
943 983
984 if (!blk_integrity_is_initialized(dm_disk(md))) {
985 t->integrity_supported = 1;
986 return blk_integrity_register(dm_disk(md), NULL);
987 }
988
989 /*
990 * If DM device already has an initalized integrity
991 * profile the new profile should not conflict.
992 */
993 if (blk_integrity_is_initialized(template_disk) &&
994 blk_integrity_compare(dm_disk(md), template_disk) < 0) {
995 DMWARN("%s: conflict with existing integrity profile: "
996 "%s profile mismatch",
997 dm_device_name(t->md),
998 template_disk->disk_name);
999 return 1;
1000 }
1001
1002 /* Preserve existing initialized integrity profile */
1003 t->integrity_supported = 1;
944 return 0; 1004 return 0;
945} 1005}
946 1006
@@ -1094,41 +1154,27 @@ combine_limits:
1094 1154
1095/* 1155/*
1096 * Set the integrity profile for this device if all devices used have 1156 * Set the integrity profile for this device if all devices used have
1097 * matching profiles. 1157 * matching profiles. We're quite deep in the resume path but still
1158 * don't know if all devices (particularly DM devices this device
1159 * may be stacked on) have matching profiles. Even if the profiles
1160 * don't match we have no way to fail (to resume) at this point.
1098 */ 1161 */
1099static void dm_table_set_integrity(struct dm_table *t) 1162static void dm_table_set_integrity(struct dm_table *t)
1100{ 1163{
1101 struct list_head *devices = dm_table_get_devices(t); 1164 struct gendisk *template_disk = NULL;
1102 struct dm_dev_internal *prev = NULL, *dd = NULL;
1103 1165
1104 if (!blk_get_integrity(dm_disk(t->md))) 1166 if (!blk_get_integrity(dm_disk(t->md)))
1105 return; 1167 return;
1106 1168
1107 list_for_each_entry(dd, devices, list) { 1169 template_disk = dm_table_get_integrity_disk(t, true);
1108 if (prev && 1170 if (!template_disk &&
1109 blk_integrity_compare(prev->dm_dev.bdev->bd_disk, 1171 blk_integrity_is_initialized(dm_disk(t->md))) {
1110 dd->dm_dev.bdev->bd_disk) < 0) { 1172 DMWARN("%s: device no longer has a valid integrity profile",
1111 DMWARN("%s: integrity not set: %s and %s mismatch", 1173 dm_device_name(t->md));
1112 dm_device_name(t->md), 1174 return;
1113 prev->dm_dev.bdev->bd_disk->disk_name,
1114 dd->dm_dev.bdev->bd_disk->disk_name);
1115 goto no_integrity;
1116 }
1117 prev = dd;
1118 } 1175 }
1119
1120 if (!prev || !bdev_get_integrity(prev->dm_dev.bdev))
1121 goto no_integrity;
1122
1123 blk_integrity_register(dm_disk(t->md), 1176 blk_integrity_register(dm_disk(t->md),
1124 bdev_get_integrity(prev->dm_dev.bdev)); 1177 blk_get_integrity(template_disk));
1125
1126 return;
1127
1128no_integrity:
1129 blk_integrity_register(dm_disk(t->md), NULL);
1130
1131 return;
1132} 1178}
1133 1179
1134void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 1180void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,