diff options
author | Damien Le Moal <damien.lemoal@wdc.com> | 2018-10-17 05:05:07 -0400 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2018-10-18 15:15:23 -0400 |
commit | 33c2865f8d011a2ca9f67124ddab9dc89382e9f1 (patch) | |
tree | 5680e9ef136dd26d8d313380bbd87f758128d349 /drivers/md/dm-zoned-metadata.c | |
parent | d857ad75edf3c0066fcd920746f9dc75382b3324 (diff) |
dm zoned: fix metadata block ref counting
Since the ref field of struct dmz_mblock is always used with the
spinlock of struct dmz_metadata locked, there is no need to use an
atomic_t type. Change the type of the ref field to an unsigne
integer.
Fixes: 3b1a94c88b79 ("dm zoned: drive-managed zoned block device target")
Cc: stable@vger.kernel.org
Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-zoned-metadata.c')
-rw-r--r-- | drivers/md/dm-zoned-metadata.c | 20 |
1 files changed, 11 insertions, 9 deletions
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index 969954915566..67b71f6e3bda 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c | |||
@@ -99,7 +99,7 @@ struct dmz_mblock { | |||
99 | struct rb_node node; | 99 | struct rb_node node; |
100 | struct list_head link; | 100 | struct list_head link; |
101 | sector_t no; | 101 | sector_t no; |
102 | atomic_t ref; | 102 | unsigned int ref; |
103 | unsigned long state; | 103 | unsigned long state; |
104 | struct page *page; | 104 | struct page *page; |
105 | void *data; | 105 | void *data; |
@@ -296,7 +296,7 @@ static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd, | |||
296 | 296 | ||
297 | RB_CLEAR_NODE(&mblk->node); | 297 | RB_CLEAR_NODE(&mblk->node); |
298 | INIT_LIST_HEAD(&mblk->link); | 298 | INIT_LIST_HEAD(&mblk->link); |
299 | atomic_set(&mblk->ref, 0); | 299 | mblk->ref = 0; |
300 | mblk->state = 0; | 300 | mblk->state = 0; |
301 | mblk->no = mblk_no; | 301 | mblk->no = mblk_no; |
302 | mblk->data = page_address(mblk->page); | 302 | mblk->data = page_address(mblk->page); |
@@ -397,7 +397,7 @@ static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd, | |||
397 | return NULL; | 397 | return NULL; |
398 | 398 | ||
399 | spin_lock(&zmd->mblk_lock); | 399 | spin_lock(&zmd->mblk_lock); |
400 | atomic_inc(&mblk->ref); | 400 | mblk->ref++; |
401 | set_bit(DMZ_META_READING, &mblk->state); | 401 | set_bit(DMZ_META_READING, &mblk->state); |
402 | dmz_insert_mblock(zmd, mblk); | 402 | dmz_insert_mblock(zmd, mblk); |
403 | spin_unlock(&zmd->mblk_lock); | 403 | spin_unlock(&zmd->mblk_lock); |
@@ -484,7 +484,8 @@ static void dmz_release_mblock(struct dmz_metadata *zmd, | |||
484 | 484 | ||
485 | spin_lock(&zmd->mblk_lock); | 485 | spin_lock(&zmd->mblk_lock); |
486 | 486 | ||
487 | if (atomic_dec_and_test(&mblk->ref)) { | 487 | mblk->ref--; |
488 | if (mblk->ref == 0) { | ||
488 | if (test_bit(DMZ_META_ERROR, &mblk->state)) { | 489 | if (test_bit(DMZ_META_ERROR, &mblk->state)) { |
489 | rb_erase(&mblk->node, &zmd->mblk_rbtree); | 490 | rb_erase(&mblk->node, &zmd->mblk_rbtree); |
490 | dmz_free_mblock(zmd, mblk); | 491 | dmz_free_mblock(zmd, mblk); |
@@ -511,7 +512,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd, | |||
511 | mblk = dmz_lookup_mblock(zmd, mblk_no); | 512 | mblk = dmz_lookup_mblock(zmd, mblk_no); |
512 | if (mblk) { | 513 | if (mblk) { |
513 | /* Cache hit: remove block from LRU list */ | 514 | /* Cache hit: remove block from LRU list */ |
514 | if (atomic_inc_return(&mblk->ref) == 1 && | 515 | mblk->ref++; |
516 | if (mblk->ref == 1 && | ||
515 | !test_bit(DMZ_META_DIRTY, &mblk->state)) | 517 | !test_bit(DMZ_META_DIRTY, &mblk->state)) |
516 | list_del_init(&mblk->link); | 518 | list_del_init(&mblk->link); |
517 | } | 519 | } |
@@ -753,7 +755,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd) | |||
753 | 755 | ||
754 | spin_lock(&zmd->mblk_lock); | 756 | spin_lock(&zmd->mblk_lock); |
755 | clear_bit(DMZ_META_DIRTY, &mblk->state); | 757 | clear_bit(DMZ_META_DIRTY, &mblk->state); |
756 | if (atomic_read(&mblk->ref) == 0) | 758 | if (mblk->ref == 0) |
757 | list_add_tail(&mblk->link, &zmd->mblk_lru_list); | 759 | list_add_tail(&mblk->link, &zmd->mblk_lru_list); |
758 | spin_unlock(&zmd->mblk_lock); | 760 | spin_unlock(&zmd->mblk_lock); |
759 | } | 761 | } |
@@ -2308,7 +2310,7 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd) | |||
2308 | mblk = list_first_entry(&zmd->mblk_dirty_list, | 2310 | mblk = list_first_entry(&zmd->mblk_dirty_list, |
2309 | struct dmz_mblock, link); | 2311 | struct dmz_mblock, link); |
2310 | dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)", | 2312 | dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)", |
2311 | (u64)mblk->no, atomic_read(&mblk->ref)); | 2313 | (u64)mblk->no, mblk->ref); |
2312 | list_del_init(&mblk->link); | 2314 | list_del_init(&mblk->link); |
2313 | rb_erase(&mblk->node, &zmd->mblk_rbtree); | 2315 | rb_erase(&mblk->node, &zmd->mblk_rbtree); |
2314 | dmz_free_mblock(zmd, mblk); | 2316 | dmz_free_mblock(zmd, mblk); |
@@ -2326,8 +2328,8 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd) | |||
2326 | root = &zmd->mblk_rbtree; | 2328 | root = &zmd->mblk_rbtree; |
2327 | rbtree_postorder_for_each_entry_safe(mblk, next, root, node) { | 2329 | rbtree_postorder_for_each_entry_safe(mblk, next, root, node) { |
2328 | dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree", | 2330 | dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree", |
2329 | (u64)mblk->no, atomic_read(&mblk->ref)); | 2331 | (u64)mblk->no, mblk->ref); |
2330 | atomic_set(&mblk->ref, 0); | 2332 | mblk->ref = 0; |
2331 | dmz_free_mblock(zmd, mblk); | 2333 | dmz_free_mblock(zmd, mblk); |
2332 | } | 2334 | } |
2333 | 2335 | ||