aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2011-08-02 07:32:08 -0400
committerAlasdair G Kergon <agk@redhat.com>2011-08-02 07:32:08 -0400
commited8b752bccf2560e305e25125721d2f0ac759e88 (patch)
treeb909fcf21ca7cdda3e7a680b37162212cce99586 /drivers/md
parent772ae5f54d69c38a5e3c4352c5fdbdaff141af21 (diff)
dm table: set flush capability based on underlying devices
DM has always advertised both REQ_FLUSH and REQ_FUA flush capabilities regardless of whether or not a given DM device's underlying devices also advertised a need for them. Block's flush-merge changes from 2.6.39 have proven to be more costly for DM devices. Performance regressions have been reported even when DM's underlying devices do not advertise that they have a write cache. Fix the performance regressions by configuring a DM device's flushing capabilities based on those of the underlying devices' capabilities. Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-table.c43
-rw-r--r--drivers/md/dm.c1
2 files changed, 43 insertions, 1 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 259ce99302fc..986b8754bb08 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1248,9 +1248,45 @@ static void dm_table_set_integrity(struct dm_table *t)
1248 blk_get_integrity(template_disk)); 1248 blk_get_integrity(template_disk));
1249} 1249}
1250 1250
1251static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1252 sector_t start, sector_t len, void *data)
1253{
1254 unsigned flush = (*(unsigned *)data);
1255 struct request_queue *q = bdev_get_queue(dev->bdev);
1256
1257 return q && (q->flush_flags & flush);
1258}
1259
1260static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
1261{
1262 struct dm_target *ti;
1263 unsigned i = 0;
1264
1265 /*
1266 * Require at least one underlying device to support flushes.
1267 * t->devices includes internal dm devices such as mirror logs
1268 * so we need to use iterate_devices here, which targets
1269 * supporting flushes must provide.
1270 */
1271 while (i < dm_table_get_num_targets(t)) {
1272 ti = dm_table_get_target(t, i++);
1273
1274 if (!ti->num_flush_requests)
1275 continue;
1276
1277 if (ti->type->iterate_devices &&
1278 ti->type->iterate_devices(ti, device_flush_capable, &flush))
1279 return 1;
1280 }
1281
1282 return 0;
1283}
1284
1251void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 1285void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1252 struct queue_limits *limits) 1286 struct queue_limits *limits)
1253{ 1287{
1288 unsigned flush = 0;
1289
1254 /* 1290 /*
1255 * Copy table's limits to the DM device's request_queue 1291 * Copy table's limits to the DM device's request_queue
1256 */ 1292 */
@@ -1261,6 +1297,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1261 else 1297 else
1262 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 1298 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1263 1299
1300 if (dm_table_supports_flush(t, REQ_FLUSH)) {
1301 flush |= REQ_FLUSH;
1302 if (dm_table_supports_flush(t, REQ_FUA))
1303 flush |= REQ_FUA;
1304 }
1305 blk_queue_flush(q, flush);
1306
1264 dm_table_set_integrity(t); 1307 dm_table_set_integrity(t);
1265 1308
1266 /* 1309 /*
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 1000eaf984ef..52b39f335bb3 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1808,7 +1808,6 @@ static void dm_init_md_queue(struct mapped_device *md)
1808 blk_queue_make_request(md->queue, dm_request); 1808 blk_queue_make_request(md->queue, dm_request);
1809 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 1809 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1810 blk_queue_merge_bvec(md->queue, dm_merge_bvec); 1810 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1811 blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA);
1812} 1811}
1813 1812
1814/* 1813/*