diff options
author | Mike Snitzer <snitzer@redhat.com> | 2010-08-11 23:14:04 -0400 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2010-08-11 23:14:04 -0400 |
commit | 57cba5d3658d9fdc019c6af14a2d80aefa651e56 (patch) | |
tree | 4905a162b6785e1a1228b8870d8011cf9035147a /drivers | |
parent | 26803b9f06d365122fae82e7554a66ef8278e0bb (diff) |
dm: rename map_info flush_request to target_request_nr
'target_request_nr' is a more generic name that reflects the fact that
it will be used for both flush and discard support.
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/dm-snap.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-stripe.c | 6 | ||||
-rw-r--r-- | drivers/md/dm.c | 18 |
3 files changed, 14 insertions, 12 deletions
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 96feada5e761..5974d3094d97 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -1692,7 +1692,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio, | |||
1692 | chunk_t chunk; | 1692 | chunk_t chunk; |
1693 | 1693 | ||
1694 | if (unlikely(bio_empty_barrier(bio))) { | 1694 | if (unlikely(bio_empty_barrier(bio))) { |
1695 | if (!map_context->flush_request) | 1695 | if (!map_context->target_request_nr) |
1696 | bio->bi_bdev = s->origin->bdev; | 1696 | bio->bi_bdev = s->origin->bdev; |
1697 | else | 1697 | else |
1698 | bio->bi_bdev = s->cow->bdev; | 1698 | bio->bi_bdev = s->cow->bdev; |
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index d6e28d732b4d..22d5e2fdab8b 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c | |||
@@ -213,10 +213,12 @@ static int stripe_map(struct dm_target *ti, struct bio *bio, | |||
213 | struct stripe_c *sc = (struct stripe_c *) ti->private; | 213 | struct stripe_c *sc = (struct stripe_c *) ti->private; |
214 | sector_t offset, chunk; | 214 | sector_t offset, chunk; |
215 | uint32_t stripe; | 215 | uint32_t stripe; |
216 | unsigned target_request_nr; | ||
216 | 217 | ||
217 | if (unlikely(bio_empty_barrier(bio))) { | 218 | if (unlikely(bio_empty_barrier(bio))) { |
218 | BUG_ON(map_context->flush_request >= sc->stripes); | 219 | target_request_nr = map_context->target_request_nr; |
219 | bio->bi_bdev = sc->stripe[map_context->flush_request].dev->bdev; | 220 | BUG_ON(target_request_nr >= sc->stripes); |
221 | bio->bi_bdev = sc->stripe[target_request_nr].dev->bdev; | ||
220 | return DM_MAPIO_REMAPPED; | 222 | return DM_MAPIO_REMAPPED; |
221 | } | 223 | } |
222 | 224 | ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 5ae0a05b4811..0d4710175885 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1183,12 +1183,12 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci, | |||
1183 | } | 1183 | } |
1184 | 1184 | ||
1185 | static void __flush_target(struct clone_info *ci, struct dm_target *ti, | 1185 | static void __flush_target(struct clone_info *ci, struct dm_target *ti, |
1186 | unsigned flush_nr) | 1186 | unsigned request_nr) |
1187 | { | 1187 | { |
1188 | struct dm_target_io *tio = alloc_tio(ci, ti); | 1188 | struct dm_target_io *tio = alloc_tio(ci, ti); |
1189 | struct bio *clone; | 1189 | struct bio *clone; |
1190 | 1190 | ||
1191 | tio->info.flush_request = flush_nr; | 1191 | tio->info.target_request_nr = request_nr; |
1192 | 1192 | ||
1193 | clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); | 1193 | clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); |
1194 | __bio_clone(clone, ci->bio); | 1194 | __bio_clone(clone, ci->bio); |
@@ -1199,13 +1199,13 @@ static void __flush_target(struct clone_info *ci, struct dm_target *ti, | |||
1199 | 1199 | ||
1200 | static int __clone_and_map_empty_barrier(struct clone_info *ci) | 1200 | static int __clone_and_map_empty_barrier(struct clone_info *ci) |
1201 | { | 1201 | { |
1202 | unsigned target_nr = 0, flush_nr; | 1202 | unsigned target_nr = 0, request_nr; |
1203 | struct dm_target *ti; | 1203 | struct dm_target *ti; |
1204 | 1204 | ||
1205 | while ((ti = dm_table_get_target(ci->map, target_nr++))) | 1205 | while ((ti = dm_table_get_target(ci->map, target_nr++))) |
1206 | for (flush_nr = 0; flush_nr < ti->num_flush_requests; | 1206 | for (request_nr = 0; request_nr < ti->num_flush_requests; |
1207 | flush_nr++) | 1207 | request_nr++) |
1208 | __flush_target(ci, ti, flush_nr); | 1208 | __flush_target(ci, ti, request_nr); |
1209 | 1209 | ||
1210 | ci->sector_count = 0; | 1210 | ci->sector_count = 0; |
1211 | 1211 | ||
@@ -2424,11 +2424,11 @@ static void dm_queue_flush(struct mapped_device *md) | |||
2424 | queue_work(md->wq, &md->work); | 2424 | queue_work(md->wq, &md->work); |
2425 | } | 2425 | } |
2426 | 2426 | ||
2427 | static void dm_rq_set_flush_nr(struct request *clone, unsigned flush_nr) | 2427 | static void dm_rq_set_target_request_nr(struct request *clone, unsigned request_nr) |
2428 | { | 2428 | { |
2429 | struct dm_rq_target_io *tio = clone->end_io_data; | 2429 | struct dm_rq_target_io *tio = clone->end_io_data; |
2430 | 2430 | ||
2431 | tio->info.flush_request = flush_nr; | 2431 | tio->info.target_request_nr = request_nr; |
2432 | } | 2432 | } |
2433 | 2433 | ||
2434 | /* Issue barrier requests to targets and wait for their completion. */ | 2434 | /* Issue barrier requests to targets and wait for their completion. */ |
@@ -2446,7 +2446,7 @@ static int dm_rq_barrier(struct mapped_device *md) | |||
2446 | ti = dm_table_get_target(map, i); | 2446 | ti = dm_table_get_target(map, i); |
2447 | for (j = 0; j < ti->num_flush_requests; j++) { | 2447 | for (j = 0; j < ti->num_flush_requests; j++) { |
2448 | clone = clone_rq(md->flush_request, md, GFP_NOIO); | 2448 | clone = clone_rq(md->flush_request, md, GFP_NOIO); |
2449 | dm_rq_set_flush_nr(clone, j); | 2449 | dm_rq_set_target_request_nr(clone, j); |
2450 | atomic_inc(&md->pending[rq_data_dir(clone)]); | 2450 | atomic_inc(&md->pending[rq_data_dir(clone)]); |
2451 | map_request(ti, clone, md); | 2451 | map_request(ti, clone, md); |
2452 | } | 2452 | } |