aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2012-07-27 10:08:09 -0400
committerAlasdair G Kergon <agk@redhat.com>2012-07-27 10:08:09 -0400
commit3c9ad9bd87b03032999ddbeb44bdf7938f7dbd57 (patch)
tree358c25628410d272958baca08a1975a8b213f34b /drivers
parent384ef0e62e409e52c80adef5b1ff83075377c19e (diff)
dm persistent data: stop using dm_bm_unlock_move when shadowing blocks in tm
Stop using dm_bm_unlock_move when shadowing blocks in the transaction manager as an optimisation and remove the function as it is then no longer used. Some code, such as the space maps, keeps using on-disk data structures from the previous transaction. It can do this because blocks won't be reallocated until the subsequent transaction. Using dm_bm_unlock_move to copy blocks sounds like a win, but it forces a synchronous read should the old block be accessed. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c16
-rw-r--r--drivers/md/persistent-data/dm-block-manager.h8
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c17
3 files changed, 14 insertions, 27 deletions
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 4b5c504f47a..ad1712e802f 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -584,22 +584,6 @@ int dm_bm_unlock(struct dm_block *b)
584} 584}
585EXPORT_SYMBOL_GPL(dm_bm_unlock); 585EXPORT_SYMBOL_GPL(dm_bm_unlock);
586 586
587int dm_bm_unlock_move(struct dm_block *b, dm_block_t n)
588{
589 struct buffer_aux *aux;
590
591 aux = dm_bufio_get_aux_data(to_buffer(b));
592
593 if (aux->write_locked) {
594 dm_bufio_mark_buffer_dirty(to_buffer(b));
595 bl_up_write(&aux->lock);
596 } else
597 bl_up_read(&aux->lock);
598
599 dm_bufio_release_move(to_buffer(b), n);
600 return 0;
601}
602
603int dm_bm_flush_and_unlock(struct dm_block_manager *bm, 587int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
604 struct dm_block *superblock) 588 struct dm_block *superblock)
605{ 589{
diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
index 924833d2dfa..32788556663 100644
--- a/drivers/md/persistent-data/dm-block-manager.h
+++ b/drivers/md/persistent-data/dm-block-manager.h
@@ -97,14 +97,6 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm, dm_block_t b,
97int dm_bm_unlock(struct dm_block *b); 97int dm_bm_unlock(struct dm_block *b);
98 98
99/* 99/*
100 * An optimisation; we often want to copy a block's contents to a new
101 * block. eg, as part of the shadowing operation. It's far better for
102 * bufio to do this move behind the scenes than hold 2 locks and memcpy the
103 * data.
104 */
105int dm_bm_unlock_move(struct dm_block *b, dm_block_t n);
106
107/*
108 * It's a common idiom to have a superblock that should be committed last. 100 * It's a common idiom to have a superblock that should be committed last.
109 * 101 *
110 * @superblock should be write-locked on entry. It will be unlocked during 102 * @superblock should be write-locked on entry. It will be unlocked during
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index b4f05830af0..d247a35da3c 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -219,13 +219,24 @@ static int __shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
219 if (r < 0) 219 if (r < 0)
220 return r; 220 return r;
221 221
222 r = dm_bm_unlock_move(orig_block, new); 222 /*
223 if (r < 0) { 223 * It would be tempting to use dm_bm_unlock_move here, but some
224 * code, such as the space maps, keeps using the old data structures
225 * secure in the knowledge they won't be changed until the next
226 * transaction. Using unlock_move would force a synchronous read
227 * since the old block would no longer be in the cache.
228 */
229 r = dm_bm_write_lock_zero(tm->bm, new, v, result);
230 if (r) {
224 dm_bm_unlock(orig_block); 231 dm_bm_unlock(orig_block);
225 return r; 232 return r;
226 } 233 }
227 234
228 return dm_bm_write_lock(tm->bm, new, v, result); 235 memcpy(dm_block_data(*result), dm_block_data(orig_block),
236 dm_bm_block_size(tm->bm));
237
238 dm_bm_unlock(orig_block);
239 return r;
229} 240}
230 241
231int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig, 242int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,