aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2013-11-08 11:39:50 -0500
committerMike Snitzer <snitzer@redhat.com>2013-11-11 11:37:51 -0500
commit65790ff919e2e07ccb4457415c11075b245d643b (patch)
tree9e2e0c667cf0fe62d72c1c37514d2535aa52ba5b /drivers/md
parent532906aa7f9656209f30f08dfadd328fc1bc6912 (diff)
dm cache: add cache block invalidation support
Cache block invalidation is removing an entry from the cache without writing it back. Cache blocks can be invalidated via the 'invalidate_cblocks' message, which takes an arbitrary number of cblock ranges: invalidate_cblocks [<cblock>|<cblock begin>-<cblock end>]* E.g. dmsetup message my_cache 0 invalidate_cblocks 2345 3456-4567 5678-6789 Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-cache-target.c225
1 files changed, 222 insertions, 3 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 8c0217753cc5..41e664b474f1 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -150,6 +150,25 @@ struct cache_stats {
150 atomic_t discard_count; 150 atomic_t discard_count;
151}; 151};
152 152
153/*
154 * Defines a range of cblocks, begin to (end - 1) are in the range. end is
155 * the one-past-the-end value.
156 */
157struct cblock_range {
158 dm_cblock_t begin;
159 dm_cblock_t end;
160};
161
162struct invalidation_request {
163 struct list_head list;
164 struct cblock_range *cblocks;
165
166 atomic_t complete;
167 int err;
168
169 wait_queue_head_t result_wait;
170};
171
153struct cache { 172struct cache {
154 struct dm_target *ti; 173 struct dm_target *ti;
155 struct dm_target_callbacks callbacks; 174 struct dm_target_callbacks callbacks;
@@ -241,6 +260,7 @@ struct cache {
241 260
242 bool need_tick_bio:1; 261 bool need_tick_bio:1;
243 bool sized:1; 262 bool sized:1;
263 bool invalidate:1;
244 bool commit_requested:1; 264 bool commit_requested:1;
245 bool loaded_mappings:1; 265 bool loaded_mappings:1;
246 bool loaded_discards:1; 266 bool loaded_discards:1;
@@ -251,6 +271,12 @@ struct cache {
251 struct cache_features features; 271 struct cache_features features;
252 272
253 struct cache_stats stats; 273 struct cache_stats stats;
274
275 /*
276 * Invalidation fields.
277 */
278 spinlock_t invalidation_lock;
279 struct list_head invalidation_requests;
254}; 280};
255 281
256struct per_bio_data { 282struct per_bio_data {
@@ -283,6 +309,7 @@ struct dm_cache_migration {
283 bool demote:1; 309 bool demote:1;
284 bool promote:1; 310 bool promote:1;
285 bool requeue_holder:1; 311 bool requeue_holder:1;
312 bool invalidate:1;
286 313
287 struct dm_bio_prison_cell *old_ocell; 314 struct dm_bio_prison_cell *old_ocell;
288 struct dm_bio_prison_cell *new_ocell; 315 struct dm_bio_prison_cell *new_ocell;
@@ -904,8 +931,11 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
904 list_add_tail(&mg->list, &cache->quiesced_migrations); 931 list_add_tail(&mg->list, &cache->quiesced_migrations);
905 spin_unlock_irqrestore(&cache->lock, flags); 932 spin_unlock_irqrestore(&cache->lock, flags);
906 933
907 } else 934 } else {
935 if (mg->invalidate)
936 policy_remove_mapping(cache->policy, mg->old_oblock);
908 cleanup_migration(mg); 937 cleanup_migration(mg);
938 }
909 939
910 } else { 940 } else {
911 if (mg->requeue_holder) 941 if (mg->requeue_holder)
@@ -1115,6 +1145,7 @@ static void promote(struct cache *cache, struct prealloc *structs,
1115 mg->demote = false; 1145 mg->demote = false;
1116 mg->promote = true; 1146 mg->promote = true;
1117 mg->requeue_holder = true; 1147 mg->requeue_holder = true;
1148 mg->invalidate = false;
1118 mg->cache = cache; 1149 mg->cache = cache;
1119 mg->new_oblock = oblock; 1150 mg->new_oblock = oblock;
1120 mg->cblock = cblock; 1151 mg->cblock = cblock;
@@ -1137,6 +1168,7 @@ static void writeback(struct cache *cache, struct prealloc *structs,
1137 mg->demote = false; 1168 mg->demote = false;
1138 mg->promote = false; 1169 mg->promote = false;
1139 mg->requeue_holder = true; 1170 mg->requeue_holder = true;
1171 mg->invalidate = false;
1140 mg->cache = cache; 1172 mg->cache = cache;
1141 mg->old_oblock = oblock; 1173 mg->old_oblock = oblock;
1142 mg->cblock = cblock; 1174 mg->cblock = cblock;
@@ -1161,6 +1193,7 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs,
1161 mg->demote = true; 1193 mg->demote = true;
1162 mg->promote = true; 1194 mg->promote = true;
1163 mg->requeue_holder = true; 1195 mg->requeue_holder = true;
1196 mg->invalidate = false;
1164 mg->cache = cache; 1197 mg->cache = cache;
1165 mg->old_oblock = old_oblock; 1198 mg->old_oblock = old_oblock;
1166 mg->new_oblock = new_oblock; 1199 mg->new_oblock = new_oblock;
@@ -1188,6 +1221,7 @@ static void invalidate(struct cache *cache, struct prealloc *structs,
1188 mg->demote = true; 1221 mg->demote = true;
1189 mg->promote = false; 1222 mg->promote = false;
1190 mg->requeue_holder = true; 1223 mg->requeue_holder = true;
1224 mg->invalidate = true;
1191 mg->cache = cache; 1225 mg->cache = cache;
1192 mg->old_oblock = oblock; 1226 mg->old_oblock = oblock;
1193 mg->cblock = cblock; 1227 mg->cblock = cblock;
@@ -1525,6 +1559,58 @@ static void writeback_some_dirty_blocks(struct cache *cache)
1525} 1559}
1526 1560
1527/*---------------------------------------------------------------- 1561/*----------------------------------------------------------------
1562 * Invalidations.
1563 * Dropping something from the cache *without* writing back.
1564 *--------------------------------------------------------------*/
1565
1566static void process_invalidation_request(struct cache *cache, struct invalidation_request *req)
1567{
1568 int r = 0;
1569 uint64_t begin = from_cblock(req->cblocks->begin);
1570 uint64_t end = from_cblock(req->cblocks->end);
1571
1572 while (begin != end) {
1573 r = policy_remove_cblock(cache->policy, to_cblock(begin));
1574 if (!r) {
1575 r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin));
1576 if (r)
1577 break;
1578
1579 } else if (r == -ENODATA) {
1580 /* harmless, already unmapped */
1581 r = 0;
1582
1583 } else {
1584 DMERR("policy_remove_cblock failed");
1585 break;
1586 }
1587
1588 begin++;
1589 }
1590
1591 cache->commit_requested = true;
1592
1593 req->err = r;
1594 atomic_set(&req->complete, 1);
1595
1596 wake_up(&req->result_wait);
1597}
1598
1599static void process_invalidation_requests(struct cache *cache)
1600{
1601 struct list_head list;
1602 struct invalidation_request *req, *tmp;
1603
1604 INIT_LIST_HEAD(&list);
1605 spin_lock(&cache->invalidation_lock);
1606 list_splice_init(&cache->invalidation_requests, &list);
1607 spin_unlock(&cache->invalidation_lock);
1608
1609 list_for_each_entry_safe (req, tmp, &list, list)
1610 process_invalidation_request(cache, req);
1611}
1612
1613/*----------------------------------------------------------------
1528 * Main worker loop 1614 * Main worker loop
1529 *--------------------------------------------------------------*/ 1615 *--------------------------------------------------------------*/
1530static bool is_quiescing(struct cache *cache) 1616static bool is_quiescing(struct cache *cache)
@@ -1593,7 +1679,8 @@ static int more_work(struct cache *cache)
1593 !bio_list_empty(&cache->deferred_writethrough_bios) || 1679 !bio_list_empty(&cache->deferred_writethrough_bios) ||
1594 !list_empty(&cache->quiesced_migrations) || 1680 !list_empty(&cache->quiesced_migrations) ||
1595 !list_empty(&cache->completed_migrations) || 1681 !list_empty(&cache->completed_migrations) ||
1596 !list_empty(&cache->need_commit_migrations); 1682 !list_empty(&cache->need_commit_migrations) ||
1683 cache->invalidate;
1597} 1684}
1598 1685
1599static void do_worker(struct work_struct *ws) 1686static void do_worker(struct work_struct *ws)
@@ -1605,6 +1692,7 @@ static void do_worker(struct work_struct *ws)
1605 writeback_some_dirty_blocks(cache); 1692 writeback_some_dirty_blocks(cache);
1606 process_deferred_writethrough_bios(cache); 1693 process_deferred_writethrough_bios(cache);
1607 process_deferred_bios(cache); 1694 process_deferred_bios(cache);
1695 process_invalidation_requests(cache);
1608 } 1696 }
1609 1697
1610 process_migrations(cache, &cache->quiesced_migrations, issue_copy); 1698 process_migrations(cache, &cache->quiesced_migrations, issue_copy);
@@ -2271,6 +2359,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2271 2359
2272 cache->need_tick_bio = true; 2360 cache->need_tick_bio = true;
2273 cache->sized = false; 2361 cache->sized = false;
2362 cache->invalidate = false;
2274 cache->commit_requested = false; 2363 cache->commit_requested = false;
2275 cache->loaded_mappings = false; 2364 cache->loaded_mappings = false;
2276 cache->loaded_discards = false; 2365 cache->loaded_discards = false;
@@ -2284,6 +2373,9 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2284 atomic_set(&cache->stats.commit_count, 0); 2373 atomic_set(&cache->stats.commit_count, 0);
2285 atomic_set(&cache->stats.discard_count, 0); 2374 atomic_set(&cache->stats.discard_count, 0);
2286 2375
2376 spin_lock_init(&cache->invalidation_lock);
2377 INIT_LIST_HEAD(&cache->invalidation_requests);
2378
2287 *result = cache; 2379 *result = cache;
2288 return 0; 2380 return 0;
2289 2381
@@ -2833,7 +2925,128 @@ err:
2833} 2925}
2834 2926
2835/* 2927/*
2836 * Supports <key> <value>. 2928 * A cache block range can take two forms:
2929 *
2930 * i) A single cblock, eg. '3456'
2931 * ii) A begin and end cblock with dots between, eg. 123-234
2932 */
2933static int parse_cblock_range(struct cache *cache, const char *str,
2934 struct cblock_range *result)
2935{
2936 char dummy;
2937 uint64_t b, e;
2938 int r;
2939
2940 /*
2941 * Try and parse form (ii) first.
2942 */
2943 r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
2944 if (r < 0)
2945 return r;
2946
2947 if (r == 2) {
2948 result->begin = to_cblock(b);
2949 result->end = to_cblock(e);
2950 return 0;
2951 }
2952
2953 /*
2954 * That didn't work, try form (i).
2955 */
2956 r = sscanf(str, "%llu%c", &b, &dummy);
2957 if (r < 0)
2958 return r;
2959
2960 if (r == 1) {
2961 result->begin = to_cblock(b);
2962 result->end = to_cblock(from_cblock(result->begin) + 1u);
2963 return 0;
2964 }
2965
2966 DMERR("invalid cblock range '%s'", str);
2967 return -EINVAL;
2968}
2969
2970static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
2971{
2972 uint64_t b = from_cblock(range->begin);
2973 uint64_t e = from_cblock(range->end);
2974 uint64_t n = from_cblock(cache->cache_size);
2975
2976 if (b >= n) {
2977 DMERR("begin cblock out of range: %llu >= %llu", b, n);
2978 return -EINVAL;
2979 }
2980
2981 if (e > n) {
2982 DMERR("end cblock out of range: %llu > %llu", e, n);
2983 return -EINVAL;
2984 }
2985
2986 if (b >= e) {
2987 DMERR("invalid cblock range: %llu >= %llu", b, e);
2988 return -EINVAL;
2989 }
2990
2991 return 0;
2992}
2993
2994static int request_invalidation(struct cache *cache, struct cblock_range *range)
2995{
2996 struct invalidation_request req;
2997
2998 INIT_LIST_HEAD(&req.list);
2999 req.cblocks = range;
3000 atomic_set(&req.complete, 0);
3001 req.err = 0;
3002 init_waitqueue_head(&req.result_wait);
3003
3004 spin_lock(&cache->invalidation_lock);
3005 list_add(&req.list, &cache->invalidation_requests);
3006 spin_unlock(&cache->invalidation_lock);
3007 wake_worker(cache);
3008
3009 wait_event(req.result_wait, atomic_read(&req.complete));
3010 return req.err;
3011}
3012
3013static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
3014 const char **cblock_ranges)
3015{
3016 int r = 0;
3017 unsigned i;
3018 struct cblock_range range;
3019
3020 if (!passthrough_mode(&cache->features)) {
3021 DMERR("cache has to be in passthrough mode for invalidation");
3022 return -EPERM;
3023 }
3024
3025 for (i = 0; i < count; i++) {
3026 r = parse_cblock_range(cache, cblock_ranges[i], &range);
3027 if (r)
3028 break;
3029
3030 r = validate_cblock_range(cache, &range);
3031 if (r)
3032 break;
3033
3034 /*
3035 * Pass begin and end origin blocks to the worker and wake it.
3036 */
3037 r = request_invalidation(cache, &range);
3038 if (r)
3039 break;
3040 }
3041
3042 return r;
3043}
3044
3045/*
3046 * Supports
3047 * "<key> <value>"
3048 * and
3049 * "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
2837 * 3050 *
2838 * The key migration_threshold is supported by the cache target core. 3051 * The key migration_threshold is supported by the cache target core.
2839 */ 3052 */
@@ -2841,6 +3054,12 @@ static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
2841{ 3054{
2842 struct cache *cache = ti->private; 3055 struct cache *cache = ti->private;
2843 3056
3057 if (!argc)
3058 return -EINVAL;
3059
3060 if (!strcmp(argv[0], "invalidate_cblocks"))
3061 return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
3062
2844 if (argc != 2) 3063 if (argc != 2)
2845 return -EINVAL; 3064 return -EINVAL;
2846 3065