aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/md/dm-bufio.c16
-rw-r--r--drivers/md/dm-cache-background-tracker.c5
-rw-r--r--drivers/md/dm-cache-policy-smq.c31
-rw-r--r--drivers/md/dm-cache-target.c27
-rw-r--r--drivers/md/dm-mpath.c19
-rw-r--r--drivers/md/dm-rq.c1
-rw-r--r--drivers/md/dm-thin-metadata.c4
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c15
8 files changed, 66 insertions, 52 deletions
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 5db11a405129..cd8139593ccd 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -218,7 +218,7 @@ static DEFINE_SPINLOCK(param_spinlock);
218 * Buffers are freed after this timeout 218 * Buffers are freed after this timeout
219 */ 219 */
220static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; 220static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
221static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; 221static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
222 222
223static unsigned long dm_bufio_peak_allocated; 223static unsigned long dm_bufio_peak_allocated;
224static unsigned long dm_bufio_allocated_kmem_cache; 224static unsigned long dm_bufio_allocated_kmem_cache;
@@ -1558,10 +1558,10 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1558 return true; 1558 return true;
1559} 1559}
1560 1560
1561static unsigned get_retain_buffers(struct dm_bufio_client *c) 1561static unsigned long get_retain_buffers(struct dm_bufio_client *c)
1562{ 1562{
1563 unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes); 1563 unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
1564 return retain_bytes / c->block_size; 1564 return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
1565} 1565}
1566 1566
1567static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, 1567static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
@@ -1571,7 +1571,7 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1571 struct dm_buffer *b, *tmp; 1571 struct dm_buffer *b, *tmp;
1572 unsigned long freed = 0; 1572 unsigned long freed = 0;
1573 unsigned long count = nr_to_scan; 1573 unsigned long count = nr_to_scan;
1574 unsigned retain_target = get_retain_buffers(c); 1574 unsigned long retain_target = get_retain_buffers(c);
1575 1575
1576 for (l = 0; l < LIST_SIZE; l++) { 1576 for (l = 0; l < LIST_SIZE; l++) {
1577 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { 1577 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
@@ -1794,8 +1794,8 @@ static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1794static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) 1794static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1795{ 1795{
1796 struct dm_buffer *b, *tmp; 1796 struct dm_buffer *b, *tmp;
1797 unsigned retain_target = get_retain_buffers(c); 1797 unsigned long retain_target = get_retain_buffers(c);
1798 unsigned count; 1798 unsigned long count;
1799 LIST_HEAD(write_list); 1799 LIST_HEAD(write_list);
1800 1800
1801 dm_bufio_lock(c); 1801 dm_bufio_lock(c);
@@ -1955,7 +1955,7 @@ MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1955module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR); 1955module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1956MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds"); 1956MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1957 1957
1958module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR); 1958module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
1959MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory"); 1959MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
1960 1960
1961module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR); 1961module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
diff --git a/drivers/md/dm-cache-background-tracker.c b/drivers/md/dm-cache-background-tracker.c
index 9b1afdfb13f0..707233891291 100644
--- a/drivers/md/dm-cache-background-tracker.c
+++ b/drivers/md/dm-cache-background-tracker.c
@@ -33,6 +33,11 @@ struct background_tracker *btracker_create(unsigned max_work)
33{ 33{
34 struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL); 34 struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL);
35 35
36 if (!b) {
37 DMERR("couldn't create background_tracker");
38 return NULL;
39 }
40
36 b->max_work = max_work; 41 b->max_work = max_work;
37 atomic_set(&b->pending_promotes, 0); 42 atomic_set(&b->pending_promotes, 0);
38 atomic_set(&b->pending_writebacks, 0); 43 atomic_set(&b->pending_writebacks, 0);
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 72479bd61e11..e5eb9c9b4bc8 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1120,8 +1120,6 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
1120 * Cache entries may not be populated. So we cannot rely on the 1120 * Cache entries may not be populated. So we cannot rely on the
1121 * size of the clean queue. 1121 * size of the clean queue.
1122 */ 1122 */
1123 unsigned nr_clean;
1124
1125 if (idle) { 1123 if (idle) {
1126 /* 1124 /*
1127 * We'd like to clean everything. 1125 * We'd like to clean everything.
@@ -1129,18 +1127,16 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
1129 return q_size(&mq->dirty) == 0u; 1127 return q_size(&mq->dirty) == 0u;
1130 } 1128 }
1131 1129
1132 nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty); 1130 /*
1133 return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >= 1131 * If we're busy we don't worry about cleaning at all.
1134 percent_to_target(mq, CLEAN_TARGET); 1132 */
1133 return true;
1135} 1134}
1136 1135
1137static bool free_target_met(struct smq_policy *mq, bool idle) 1136static bool free_target_met(struct smq_policy *mq)
1138{ 1137{
1139 unsigned nr_free; 1138 unsigned nr_free;
1140 1139
1141 if (!idle)
1142 return true;
1143
1144 nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated; 1140 nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
1145 return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >= 1141 return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
1146 percent_to_target(mq, FREE_TARGET); 1142 percent_to_target(mq, FREE_TARGET);
@@ -1190,9 +1186,9 @@ static void queue_demotion(struct smq_policy *mq)
1190 if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed))) 1186 if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed)))
1191 return; 1187 return;
1192 1188
1193 e = q_peek(&mq->clean, mq->clean.nr_levels, true); 1189 e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
1194 if (!e) { 1190 if (!e) {
1195 if (!clean_target_met(mq, false)) 1191 if (!clean_target_met(mq, true))
1196 queue_writeback(mq); 1192 queue_writeback(mq);
1197 return; 1193 return;
1198 } 1194 }
@@ -1220,7 +1216,7 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
1220 * We always claim to be 'idle' to ensure some demotions happen 1216 * We always claim to be 'idle' to ensure some demotions happen
1221 * with continuous loads. 1217 * with continuous loads.
1222 */ 1218 */
1223 if (!free_target_met(mq, true)) 1219 if (!free_target_met(mq))
1224 queue_demotion(mq); 1220 queue_demotion(mq);
1225 return; 1221 return;
1226 } 1222 }
@@ -1421,14 +1417,10 @@ static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
1421 spin_lock_irqsave(&mq->lock, flags); 1417 spin_lock_irqsave(&mq->lock, flags);
1422 r = btracker_issue(mq->bg_work, result); 1418 r = btracker_issue(mq->bg_work, result);
1423 if (r == -ENODATA) { 1419 if (r == -ENODATA) {
1424 /* find some writeback work to do */ 1420 if (!clean_target_met(mq, idle)) {
1425 if (mq->migrations_allowed && !free_target_met(mq, idle))
1426 queue_demotion(mq);
1427
1428 else if (!clean_target_met(mq, idle))
1429 queue_writeback(mq); 1421 queue_writeback(mq);
1430 1422 r = btracker_issue(mq->bg_work, result);
1431 r = btracker_issue(mq->bg_work, result); 1423 }
1432 } 1424 }
1433 spin_unlock_irqrestore(&mq->lock, flags); 1425 spin_unlock_irqrestore(&mq->lock, flags);
1434 1426
@@ -1452,6 +1444,7 @@ static void __complete_background_work(struct smq_policy *mq,
1452 clear_pending(mq, e); 1444 clear_pending(mq, e);
1453 if (success) { 1445 if (success) {
1454 e->oblock = work->oblock; 1446 e->oblock = work->oblock;
1447 e->level = NR_CACHE_LEVELS - 1;
1455 push(mq, e); 1448 push(mq, e);
1456 // h, q, a 1449 // h, q, a
1457 } else { 1450 } else {
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1db375f50a13..d682a0511381 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -94,6 +94,9 @@ static void iot_io_begin(struct io_tracker *iot, sector_t len)
94 94
95static void __iot_io_end(struct io_tracker *iot, sector_t len) 95static void __iot_io_end(struct io_tracker *iot, sector_t len)
96{ 96{
97 if (!len)
98 return;
99
97 iot->in_flight -= len; 100 iot->in_flight -= len;
98 if (!iot->in_flight) 101 if (!iot->in_flight)
99 iot->idle_time = jiffies; 102 iot->idle_time = jiffies;
@@ -474,7 +477,7 @@ struct cache {
474 spinlock_t invalidation_lock; 477 spinlock_t invalidation_lock;
475 struct list_head invalidation_requests; 478 struct list_head invalidation_requests;
476 479
477 struct io_tracker origin_tracker; 480 struct io_tracker tracker;
478 481
479 struct work_struct commit_ws; 482 struct work_struct commit_ws;
480 struct batcher committer; 483 struct batcher committer;
@@ -901,8 +904,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
901 904
902static bool accountable_bio(struct cache *cache, struct bio *bio) 905static bool accountable_bio(struct cache *cache, struct bio *bio)
903{ 906{
904 return ((bio->bi_bdev == cache->origin_dev->bdev) && 907 return bio_op(bio) != REQ_OP_DISCARD;
905 bio_op(bio) != REQ_OP_DISCARD);
906} 908}
907 909
908static void accounted_begin(struct cache *cache, struct bio *bio) 910static void accounted_begin(struct cache *cache, struct bio *bio)
@@ -912,7 +914,7 @@ static void accounted_begin(struct cache *cache, struct bio *bio)
912 914
913 if (accountable_bio(cache, bio)) { 915 if (accountable_bio(cache, bio)) {
914 pb->len = bio_sectors(bio); 916 pb->len = bio_sectors(bio);
915 iot_io_begin(&cache->origin_tracker, pb->len); 917 iot_io_begin(&cache->tracker, pb->len);
916 } 918 }
917} 919}
918 920
@@ -921,7 +923,7 @@ static void accounted_complete(struct cache *cache, struct bio *bio)
921 size_t pb_data_size = get_per_bio_data_size(cache); 923 size_t pb_data_size = get_per_bio_data_size(cache);
922 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 924 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
923 925
924 iot_io_end(&cache->origin_tracker, pb->len); 926 iot_io_end(&cache->tracker, pb->len);
925} 927}
926 928
927static void accounted_request(struct cache *cache, struct bio *bio) 929static void accounted_request(struct cache *cache, struct bio *bio)
@@ -1716,20 +1718,19 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
1716 1718
1717enum busy { 1719enum busy {
1718 IDLE, 1720 IDLE,
1719 MODERATE,
1720 BUSY 1721 BUSY
1721}; 1722};
1722 1723
1723static enum busy spare_migration_bandwidth(struct cache *cache) 1724static enum busy spare_migration_bandwidth(struct cache *cache)
1724{ 1725{
1725 bool idle = iot_idle_for(&cache->origin_tracker, HZ); 1726 bool idle = iot_idle_for(&cache->tracker, HZ);
1726 sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * 1727 sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
1727 cache->sectors_per_block; 1728 cache->sectors_per_block;
1728 1729
1729 if (current_volume <= cache->migration_threshold) 1730 if (idle && current_volume <= cache->migration_threshold)
1730 return idle ? IDLE : MODERATE; 1731 return IDLE;
1731 else 1732 else
1732 return idle ? MODERATE : BUSY; 1733 return BUSY;
1733} 1734}
1734 1735
1735static void inc_hit_counter(struct cache *cache, struct bio *bio) 1736static void inc_hit_counter(struct cache *cache, struct bio *bio)
@@ -2045,8 +2046,6 @@ static void check_migrations(struct work_struct *ws)
2045 2046
2046 for (;;) { 2047 for (;;) {
2047 b = spare_migration_bandwidth(cache); 2048 b = spare_migration_bandwidth(cache);
2048 if (b == BUSY)
2049 break;
2050 2049
2051 r = policy_get_background_work(cache->policy, b == IDLE, &op); 2050 r = policy_get_background_work(cache->policy, b == IDLE, &op);
2052 if (r == -ENODATA) 2051 if (r == -ENODATA)
@@ -2717,7 +2716,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2717 2716
2718 batcher_init(&cache->committer, commit_op, cache, 2717 batcher_init(&cache->committer, commit_op, cache,
2719 issue_op, cache, cache->wq); 2718 issue_op, cache, cache->wq);
2720 iot_init(&cache->origin_tracker); 2719 iot_init(&cache->tracker);
2721 2720
2722 init_rwsem(&cache->background_work_lock); 2721 init_rwsem(&cache->background_work_lock);
2723 prevent_background_work(cache); 2722 prevent_background_work(cache);
@@ -2941,7 +2940,7 @@ static void cache_postsuspend(struct dm_target *ti)
2941 2940
2942 cancel_delayed_work(&cache->waker); 2941 cancel_delayed_work(&cache->waker);
2943 flush_workqueue(cache->wq); 2942 flush_workqueue(cache->wq);
2944 WARN_ON(cache->origin_tracker.in_flight); 2943 WARN_ON(cache->tracker.in_flight);
2945 2944
2946 /* 2945 /*
2947 * If it's a flush suspend there won't be any deferred bios, so this 2946 * If it's a flush suspend there won't be any deferred bios, so this
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 926a6bcb32c8..3df056b73b66 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -447,7 +447,7 @@ failed:
447 * it has been invoked. 447 * it has been invoked.
448 */ 448 */
449#define dm_report_EIO(m) \ 449#define dm_report_EIO(m) \
450({ \ 450do { \
451 struct mapped_device *md = dm_table_get_md((m)->ti->table); \ 451 struct mapped_device *md = dm_table_get_md((m)->ti->table); \
452 \ 452 \
453 pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \ 453 pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \
@@ -455,8 +455,7 @@ failed:
455 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \ 455 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
456 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \ 456 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
457 dm_noflush_suspending((m)->ti)); \ 457 dm_noflush_suspending((m)->ti)); \
458 -EIO; \ 458} while (0)
459})
460 459
461/* 460/*
462 * Map cloned requests (request-based multipath) 461 * Map cloned requests (request-based multipath)
@@ -481,7 +480,8 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
481 if (!pgpath) { 480 if (!pgpath) {
482 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 481 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
483 return DM_MAPIO_DELAY_REQUEUE; 482 return DM_MAPIO_DELAY_REQUEUE;
484 return dm_report_EIO(m); /* Failed */ 483 dm_report_EIO(m); /* Failed */
484 return DM_MAPIO_KILL;
485 } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) || 485 } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
486 test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) { 486 test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
487 if (pg_init_all_paths(m)) 487 if (pg_init_all_paths(m))
@@ -558,7 +558,8 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
558 if (!pgpath) { 558 if (!pgpath) {
559 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 559 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
560 return DM_MAPIO_REQUEUE; 560 return DM_MAPIO_REQUEUE;
561 return dm_report_EIO(m); 561 dm_report_EIO(m);
562 return -EIO;
562 } 563 }
563 564
564 mpio->pgpath = pgpath; 565 mpio->pgpath = pgpath;
@@ -1493,7 +1494,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
1493 if (atomic_read(&m->nr_valid_paths) == 0 && 1494 if (atomic_read(&m->nr_valid_paths) == 0 &&
1494 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { 1495 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1495 if (error == -EIO) 1496 if (error == -EIO)
1496 error = dm_report_EIO(m); 1497 dm_report_EIO(m);
1497 /* complete with the original error */ 1498 /* complete with the original error */
1498 r = DM_ENDIO_DONE; 1499 r = DM_ENDIO_DONE;
1499 } 1500 }
@@ -1524,8 +1525,10 @@ static int do_end_io_bio(struct multipath *m, struct bio *clone,
1524 fail_path(mpio->pgpath); 1525 fail_path(mpio->pgpath);
1525 1526
1526 if (atomic_read(&m->nr_valid_paths) == 0 && 1527 if (atomic_read(&m->nr_valid_paths) == 0 &&
1527 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 1528 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1528 return dm_report_EIO(m); 1529 dm_report_EIO(m);
1530 return -EIO;
1531 }
1529 1532
1530 /* Queue for the daemon to resubmit */ 1533 /* Queue for the daemon to resubmit */
1531 dm_bio_restore(get_bio_details_from_bio(clone), clone); 1534 dm_bio_restore(get_bio_details_from_bio(clone), clone);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 2af27026aa2e..b639fa7246ee 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -507,6 +507,7 @@ static int map_request(struct dm_rq_target_io *tio)
507 case DM_MAPIO_KILL: 507 case DM_MAPIO_KILL:
508 /* The target wants to complete the I/O */ 508 /* The target wants to complete the I/O */
509 dm_kill_unmapped_request(rq, -EIO); 509 dm_kill_unmapped_request(rq, -EIO);
510 break;
510 default: 511 default:
511 DMWARN("unimplemented target map return value: %d", r); 512 DMWARN("unimplemented target map return value: %d", r);
512 BUG(); 513 BUG();
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 0f0251d0d337..d31d18d9727c 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -484,11 +484,11 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
484 if (r < 0) 484 if (r < 0)
485 return r; 485 return r;
486 486
487 r = save_sm_roots(pmd); 487 r = dm_tm_pre_commit(pmd->tm);
488 if (r < 0) 488 if (r < 0)
489 return r; 489 return r;
490 490
491 r = dm_tm_pre_commit(pmd->tm); 491 r = save_sm_roots(pmd);
492 if (r < 0) 492 if (r < 0)
493 return r; 493 return r;
494 494
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index ebb280a14325..32adf6b4a9c7 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -142,10 +142,23 @@ static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b)
142 142
143static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b) 143static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
144{ 144{
145 int r;
146 uint32_t old_count;
145 enum allocation_event ev; 147 enum allocation_event ev;
146 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); 148 struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
147 149
148 return sm_ll_dec(&smd->ll, b, &ev); 150 r = sm_ll_dec(&smd->ll, b, &ev);
151 if (!r && (ev == SM_FREE)) {
152 /*
153 * It's only free if it's also free in the last
154 * transaction.
155 */
156 r = sm_ll_lookup(&smd->old_ll, b, &old_count);
157 if (!r && !old_count)
158 smd->nr_allocated_this_transaction--;
159 }
160
161 return r;
149} 162}
150 163
151static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b) 164static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)