aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/md/dm-table.c11
-rw-r--r--drivers/md/dm.c33
-rw-r--r--drivers/md/dm.h2
-rw-r--r--include/linux/device-mapper.h30
4 files changed, 56 insertions, 20 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 6be58b696377..daf25d0890b3 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -967,13 +967,22 @@ bool dm_table_request_based(struct dm_table *t)
967int dm_table_alloc_md_mempools(struct dm_table *t) 967int dm_table_alloc_md_mempools(struct dm_table *t)
968{ 968{
969 unsigned type = dm_table_get_type(t); 969 unsigned type = dm_table_get_type(t);
970 unsigned per_bio_data_size = 0;
971 struct dm_target *tgt;
972 unsigned i;
970 973
971 if (unlikely(type == DM_TYPE_NONE)) { 974 if (unlikely(type == DM_TYPE_NONE)) {
972 DMWARN("no table type is set, can't allocate mempools"); 975 DMWARN("no table type is set, can't allocate mempools");
973 return -EINVAL; 976 return -EINVAL;
974 } 977 }
975 978
976 t->mempools = dm_alloc_md_mempools(type, t->integrity_supported); 979 if (type == DM_TYPE_BIO_BASED)
980 for (i = 0; i < t->num_targets; i++) {
981 tgt = t->targets + i;
982 per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size);
983 }
984
985 t->mempools = dm_alloc_md_mempools(type, t->integrity_supported, per_bio_data_size);
977 if (!t->mempools) 986 if (!t->mempools)
978 return -ENOMEM; 987 return -ENOMEM;
979 988
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 5401cdce0fc5..2765cf2ba0ff 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -63,18 +63,6 @@ struct dm_io {
63}; 63};
64 64
65/* 65/*
66 * For bio-based dm.
67 * One of these is allocated per target within a bio. Hopefully
68 * this will be simplified out one day.
69 */
70struct dm_target_io {
71 struct dm_io *io;
72 struct dm_target *ti;
73 union map_info info;
74 struct bio clone;
75};
76
77/*
78 * For request-based dm. 66 * For request-based dm.
79 * One of these is allocated per request. 67 * One of these is allocated per request.
80 */ 68 */
@@ -1980,13 +1968,20 @@ static void free_dev(struct mapped_device *md)
1980 1968
1981static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 1969static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1982{ 1970{
1983 struct dm_md_mempools *p; 1971 struct dm_md_mempools *p = dm_table_get_md_mempools(t);
1984 1972
1985 if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs) 1973 if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs) {
1986 /* the md already has necessary mempools */ 1974 /*
1975 * The md already has necessary mempools. Reload just the
1976 * bioset because front_pad may have changed because
1977 * a different table was loaded.
1978 */
1979 bioset_free(md->bs);
1980 md->bs = p->bs;
1981 p->bs = NULL;
1987 goto out; 1982 goto out;
1983 }
1988 1984
1989 p = dm_table_get_md_mempools(t);
1990 BUG_ON(!p || md->io_pool || md->tio_pool || md->bs); 1985 BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
1991 1986
1992 md->io_pool = p->io_pool; 1987 md->io_pool = p->io_pool;
@@ -2745,7 +2740,7 @@ int dm_noflush_suspending(struct dm_target *ti)
2745} 2740}
2746EXPORT_SYMBOL_GPL(dm_noflush_suspending); 2741EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2747 2742
2748struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity) 2743struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
2749{ 2744{
2750 struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL); 2745 struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
2751 unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS; 2746 unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS;
@@ -2753,6 +2748,8 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
2753 if (!pools) 2748 if (!pools)
2754 return NULL; 2749 return NULL;
2755 2750
2751 per_bio_data_size = roundup(per_bio_data_size, __alignof__(struct dm_target_io));
2752
2756 pools->io_pool = (type == DM_TYPE_BIO_BASED) ? 2753 pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
2757 mempool_create_slab_pool(MIN_IOS, _io_cache) : 2754 mempool_create_slab_pool(MIN_IOS, _io_cache) :
2758 mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache); 2755 mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
@@ -2768,7 +2765,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
2768 2765
2769 pools->bs = (type == DM_TYPE_BIO_BASED) ? 2766 pools->bs = (type == DM_TYPE_BIO_BASED) ?
2770 bioset_create(pool_size, 2767 bioset_create(pool_size,
2771 offsetof(struct dm_target_io, clone)) : 2768 per_bio_data_size + offsetof(struct dm_target_io, clone)) :
2772 bioset_create(pool_size, 2769 bioset_create(pool_size,
2773 offsetof(struct dm_rq_clone_bio_info, clone)); 2770 offsetof(struct dm_rq_clone_bio_info, clone));
2774 if (!pools->bs) 2771 if (!pools->bs)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 6a99fefaa743..45b97da1bd06 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -159,7 +159,7 @@ void dm_kcopyd_exit(void);
159/* 159/*
160 * Mempool operations 160 * Mempool operations
161 */ 161 */
162struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity); 162struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size);
163void dm_free_md_mempools(struct dm_md_mempools *pools); 163void dm_free_md_mempools(struct dm_md_mempools *pools);
164 164
165#endif 165#endif
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index d1f6cd8486f2..6f0e73b4a80d 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -210,6 +210,12 @@ struct dm_target {
210 */ 210 */
211 unsigned num_write_same_requests; 211 unsigned num_write_same_requests;
212 212
213 /*
214 * The minimum number of extra bytes allocated in each bio for the
215 * target to use. dm_per_bio_data returns the data location.
216 */
217 unsigned per_bio_data_size;
218
213 /* target specific data */ 219 /* target specific data */
214 void *private; 220 void *private;
215 221
@@ -246,6 +252,30 @@ struct dm_target_callbacks {
246 int (*congested_fn) (struct dm_target_callbacks *, int); 252 int (*congested_fn) (struct dm_target_callbacks *, int);
247}; 253};
248 254
255/*
256 * For bio-based dm.
257 * One of these is allocated for each bio.
258 * This structure shouldn't be touched directly by target drivers.
259 * It is here so that we can inline dm_per_bio_data and
260 * dm_bio_from_per_bio_data
261 */
262struct dm_target_io {
263 struct dm_io *io;
264 struct dm_target *ti;
265 union map_info info;
266 struct bio clone;
267};
268
269static inline void *dm_per_bio_data(struct bio *bio, size_t data_size)
270{
271 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
272}
273
274static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
275{
276 return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
277}
278
249int dm_register_target(struct target_type *t); 279int dm_register_target(struct target_type *t);
250void dm_unregister_target(struct target_type *t); 280void dm_unregister_target(struct target_type *t);
251 281