diff options
author | Mikulas Patocka <mpatocka@redhat.com> | 2013-11-15 16:12:51 -0500 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2014-01-07 10:11:45 -0500 |
commit | 42065460aed7201ec8adf0179a258a23bd1ebd78 (patch) | |
tree | 037490ed423cfce470971ca5dc38cc36008b91b0 /drivers/md/dm-delay.c | |
parent | 57a2f238564e0700c8648238d31f366246a5b963 (diff) |
dm delay: use per-bio data instead of a mempool and slab cache
Starting with commit c0820cf5ad095 ("dm: introduce per_bio_data"),
device mapper has the capability to pre-allocate a target-specific
structure with the bio.
This patch changes dm-delay to use this facility instead of a slab cache
and mempool.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-delay.c')
-rw-r--r-- | drivers/md/dm-delay.c | 35 |
1 files changed, 7 insertions, 28 deletions
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 2f91d6d4a2cc..a8a511c053a5 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c | |||
@@ -24,7 +24,6 @@ struct delay_c { | |||
24 | struct work_struct flush_expired_bios; | 24 | struct work_struct flush_expired_bios; |
25 | struct list_head delayed_bios; | 25 | struct list_head delayed_bios; |
26 | atomic_t may_delay; | 26 | atomic_t may_delay; |
27 | mempool_t *delayed_pool; | ||
28 | 27 | ||
29 | struct dm_dev *dev_read; | 28 | struct dm_dev *dev_read; |
30 | sector_t start_read; | 29 | sector_t start_read; |
@@ -40,14 +39,11 @@ struct delay_c { | |||
40 | struct dm_delay_info { | 39 | struct dm_delay_info { |
41 | struct delay_c *context; | 40 | struct delay_c *context; |
42 | struct list_head list; | 41 | struct list_head list; |
43 | struct bio *bio; | ||
44 | unsigned long expires; | 42 | unsigned long expires; |
45 | }; | 43 | }; |
46 | 44 | ||
47 | static DEFINE_MUTEX(delayed_bios_lock); | 45 | static DEFINE_MUTEX(delayed_bios_lock); |
48 | 46 | ||
49 | static struct kmem_cache *delayed_cache; | ||
50 | |||
51 | static void handle_delayed_timer(unsigned long data) | 47 | static void handle_delayed_timer(unsigned long data) |
52 | { | 48 | { |
53 | struct delay_c *dc = (struct delay_c *)data; | 49 | struct delay_c *dc = (struct delay_c *)data; |
@@ -87,13 +83,14 @@ static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all) | |||
87 | mutex_lock(&delayed_bios_lock); | 83 | mutex_lock(&delayed_bios_lock); |
88 | list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) { | 84 | list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) { |
89 | if (flush_all || time_after_eq(jiffies, delayed->expires)) { | 85 | if (flush_all || time_after_eq(jiffies, delayed->expires)) { |
86 | struct bio *bio = dm_bio_from_per_bio_data(delayed, | ||
87 | sizeof(struct dm_delay_info)); | ||
90 | list_del(&delayed->list); | 88 | list_del(&delayed->list); |
91 | bio_list_add(&flush_bios, delayed->bio); | 89 | bio_list_add(&flush_bios, bio); |
92 | if ((bio_data_dir(delayed->bio) == WRITE)) | 90 | if ((bio_data_dir(bio) == WRITE)) |
93 | delayed->context->writes--; | 91 | delayed->context->writes--; |
94 | else | 92 | else |
95 | delayed->context->reads--; | 93 | delayed->context->reads--; |
96 | mempool_free(delayed, dc->delayed_pool); | ||
97 | continue; | 94 | continue; |
98 | } | 95 | } |
99 | 96 | ||
@@ -185,12 +182,6 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
185 | } | 182 | } |
186 | 183 | ||
187 | out: | 184 | out: |
188 | dc->delayed_pool = mempool_create_slab_pool(128, delayed_cache); | ||
189 | if (!dc->delayed_pool) { | ||
190 | DMERR("Couldn't create delayed bio pool."); | ||
191 | goto bad_dev_write; | ||
192 | } | ||
193 | |||
194 | dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0); | 185 | dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0); |
195 | if (!dc->kdelayd_wq) { | 186 | if (!dc->kdelayd_wq) { |
196 | DMERR("Couldn't start kdelayd"); | 187 | DMERR("Couldn't start kdelayd"); |
@@ -206,12 +197,11 @@ out: | |||
206 | 197 | ||
207 | ti->num_flush_bios = 1; | 198 | ti->num_flush_bios = 1; |
208 | ti->num_discard_bios = 1; | 199 | ti->num_discard_bios = 1; |
200 | ti->per_bio_data_size = sizeof(struct dm_delay_info); | ||
209 | ti->private = dc; | 201 | ti->private = dc; |
210 | return 0; | 202 | return 0; |
211 | 203 | ||
212 | bad_queue: | 204 | bad_queue: |
213 | mempool_destroy(dc->delayed_pool); | ||
214 | bad_dev_write: | ||
215 | if (dc->dev_write) | 205 | if (dc->dev_write) |
216 | dm_put_device(ti, dc->dev_write); | 206 | dm_put_device(ti, dc->dev_write); |
217 | bad_dev_read: | 207 | bad_dev_read: |
@@ -232,7 +222,6 @@ static void delay_dtr(struct dm_target *ti) | |||
232 | if (dc->dev_write) | 222 | if (dc->dev_write) |
233 | dm_put_device(ti, dc->dev_write); | 223 | dm_put_device(ti, dc->dev_write); |
234 | 224 | ||
235 | mempool_destroy(dc->delayed_pool); | ||
236 | kfree(dc); | 225 | kfree(dc); |
237 | } | 226 | } |
238 | 227 | ||
@@ -244,10 +233,9 @@ static int delay_bio(struct delay_c *dc, int delay, struct bio *bio) | |||
244 | if (!delay || !atomic_read(&dc->may_delay)) | 233 | if (!delay || !atomic_read(&dc->may_delay)) |
245 | return 1; | 234 | return 1; |
246 | 235 | ||
247 | delayed = mempool_alloc(dc->delayed_pool, GFP_NOIO); | 236 | delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info)); |
248 | 237 | ||
249 | delayed->context = dc; | 238 | delayed->context = dc; |
250 | delayed->bio = bio; | ||
251 | delayed->expires = expires = jiffies + (delay * HZ / 1000); | 239 | delayed->expires = expires = jiffies + (delay * HZ / 1000); |
252 | 240 | ||
253 | mutex_lock(&delayed_bios_lock); | 241 | mutex_lock(&delayed_bios_lock); |
@@ -356,13 +344,7 @@ static struct target_type delay_target = { | |||
356 | 344 | ||
357 | static int __init dm_delay_init(void) | 345 | static int __init dm_delay_init(void) |
358 | { | 346 | { |
359 | int r = -ENOMEM; | 347 | int r; |
360 | |||
361 | delayed_cache = KMEM_CACHE(dm_delay_info, 0); | ||
362 | if (!delayed_cache) { | ||
363 | DMERR("Couldn't create delayed bio cache."); | ||
364 | goto bad_memcache; | ||
365 | } | ||
366 | 348 | ||
367 | r = dm_register_target(&delay_target); | 349 | r = dm_register_target(&delay_target); |
368 | if (r < 0) { | 350 | if (r < 0) { |
@@ -373,15 +355,12 @@ static int __init dm_delay_init(void) | |||
373 | return 0; | 355 | return 0; |
374 | 356 | ||
375 | bad_register: | 357 | bad_register: |
376 | kmem_cache_destroy(delayed_cache); | ||
377 | bad_memcache: | ||
378 | return r; | 358 | return r; |
379 | } | 359 | } |
380 | 360 | ||
381 | static void __exit dm_delay_exit(void) | 361 | static void __exit dm_delay_exit(void) |
382 | { | 362 | { |
383 | dm_unregister_target(&delay_target); | 363 | dm_unregister_target(&delay_target); |
384 | kmem_cache_destroy(delayed_cache); | ||
385 | } | 364 | } |
386 | 365 | ||
387 | /* Module hooks */ | 366 | /* Module hooks */ |