aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/dm-io.c2
-rw-r--r--drivers/md/dm.c2
-rw-r--r--fs/bio-integrity.c2
-rw-r--r--fs/bio.c191
-rw-r--r--include/linux/bio.h6
6 files changed, 170 insertions, 35 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index ce26c84af064..3326750ec02c 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1060,7 +1060,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1060 goto bad_page_pool; 1060 goto bad_page_pool;
1061 } 1061 }
1062 1062
1063 cc->bs = bioset_create(MIN_IOS, MIN_IOS); 1063 cc->bs = bioset_create(MIN_IOS, 0);
1064 if (!cc->bs) { 1064 if (!cc->bs) {
1065 ti->error = "Cannot allocate crypt bioset"; 1065 ti->error = "Cannot allocate crypt bioset";
1066 goto bad_bs; 1066 goto bad_bs;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 2fd6d4450637..a34338567a2a 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -56,7 +56,7 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages)
56 if (!client->pool) 56 if (!client->pool)
57 goto bad; 57 goto bad;
58 58
59 client->bios = bioset_create(16, 16); 59 client->bios = bioset_create(16, 0);
60 if (!client->bios) 60 if (!client->bios)
61 goto bad; 61 goto bad;
62 62
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 343094c3feeb..421c9f02d8ca 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1093,7 +1093,7 @@ static struct mapped_device *alloc_dev(int minor)
1093 if (!md->tio_pool) 1093 if (!md->tio_pool)
1094 goto bad_tio_pool; 1094 goto bad_tio_pool;
1095 1095
1096 md->bs = bioset_create(16, 16); 1096 md->bs = bioset_create(16, 0);
1097 if (!md->bs) 1097 if (!md->bs)
1098 goto bad_no_bioset; 1098 goto bad_no_bioset;
1099 1099
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 19caf7c962ac..77ebc3c263d6 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -111,7 +111,7 @@ void bio_integrity_free(struct bio *bio, struct bio_set *bs)
111 && bip->bip_buf != NULL) 111 && bip->bip_buf != NULL)
112 kfree(bip->bip_buf); 112 kfree(bip->bip_buf);
113 113
114 mempool_free(bip->bip_vec, bs->bvec_pools[bip->bip_pool]); 114 bvec_free_bs(bs, bip->bip_vec, bip->bip_pool);
115 mempool_free(bip, bs->bio_integrity_pool); 115 mempool_free(bip, bs->bio_integrity_pool);
116 116
117 bio->bi_integrity = NULL; 117 bio->bi_integrity = NULL;
diff --git a/fs/bio.c b/fs/bio.c
index 1ab8986b0411..0146f80789e9 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -31,8 +31,6 @@
31 31
32DEFINE_TRACE(block_split); 32DEFINE_TRACE(block_split);
33 33
34static struct kmem_cache *bio_slab __read_mostly;
35
36static mempool_t *bio_split_pool __read_mostly; 34static mempool_t *bio_split_pool __read_mostly;
37 35
38/* 36/*
@@ -40,9 +38,8 @@ static mempool_t *bio_split_pool __read_mostly;
40 * break badly! cannot be bigger than what you can fit into an 38 * break badly! cannot be bigger than what you can fit into an
41 * unsigned short 39 * unsigned short
42 */ 40 */
43
44#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } 41#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
45static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = { 42struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
46 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), 43 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
47}; 44};
48#undef BV 45#undef BV
@@ -53,11 +50,119 @@ static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
53 */ 50 */
54struct bio_set *fs_bio_set; 51struct bio_set *fs_bio_set;
55 52
53/*
54 * Our slab pool management
55 */
56struct bio_slab {
57 struct kmem_cache *slab;
58 unsigned int slab_ref;
59 unsigned int slab_size;
60 char name[8];
61};
62static DEFINE_MUTEX(bio_slab_lock);
63static struct bio_slab *bio_slabs;
64static unsigned int bio_slab_nr, bio_slab_max;
65
66static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
67{
68 unsigned int sz = sizeof(struct bio) + extra_size;
69 struct kmem_cache *slab = NULL;
70 struct bio_slab *bslab;
71 unsigned int i, entry = -1;
72
73 mutex_lock(&bio_slab_lock);
74
75 i = 0;
76 while (i < bio_slab_nr) {
77 struct bio_slab *bslab = &bio_slabs[i];
78
79 if (!bslab->slab && entry == -1)
80 entry = i;
81 else if (bslab->slab_size == sz) {
82 slab = bslab->slab;
83 bslab->slab_ref++;
84 break;
85 }
86 i++;
87 }
88
89 if (slab)
90 goto out_unlock;
91
92 if (bio_slab_nr == bio_slab_max && entry == -1) {
93 bio_slab_max <<= 1;
94 bio_slabs = krealloc(bio_slabs,
95 bio_slab_max * sizeof(struct bio_slab),
96 GFP_KERNEL);
97 if (!bio_slabs)
98 goto out_unlock;
99 }
100 if (entry == -1)
101 entry = bio_slab_nr++;
102
103 bslab = &bio_slabs[entry];
104
105 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
106 slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL);
107 if (!slab)
108 goto out_unlock;
109
110 printk("bio: create slab <%s> at %d\n", bslab->name, entry);
111 bslab->slab = slab;
112 bslab->slab_ref = 1;
113 bslab->slab_size = sz;
114out_unlock:
115 mutex_unlock(&bio_slab_lock);
116 return slab;
117}
118
119static void bio_put_slab(struct bio_set *bs)
120{
121 struct bio_slab *bslab = NULL;
122 unsigned int i;
123
124 mutex_lock(&bio_slab_lock);
125
126 for (i = 0; i < bio_slab_nr; i++) {
127 if (bs->bio_slab == bio_slabs[i].slab) {
128 bslab = &bio_slabs[i];
129 break;
130 }
131 }
132
133 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
134 goto out;
135
136 WARN_ON(!bslab->slab_ref);
137
138 if (--bslab->slab_ref)
139 goto out;
140
141 kmem_cache_destroy(bslab->slab);
142 bslab->slab = NULL;
143
144out:
145 mutex_unlock(&bio_slab_lock);
146}
147
56unsigned int bvec_nr_vecs(unsigned short idx) 148unsigned int bvec_nr_vecs(unsigned short idx)
57{ 149{
58 return bvec_slabs[idx].nr_vecs; 150 return bvec_slabs[idx].nr_vecs;
59} 151}
60 152
153void bvec_free_bs(struct bio_set *bs, struct bio_vec *bv, unsigned int idx)
154{
155 BIO_BUG_ON(idx >= BIOVEC_NR_POOLS);
156
157 if (idx == BIOVEC_MAX_IDX)
158 mempool_free(bv, bs->bvec_pool);
159 else {
160 struct biovec_slab *bvs = bvec_slabs + idx;
161
162 kmem_cache_free(bvs->slab, bv);
163 }
164}
165
61struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, 166struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx,
62 struct bio_set *bs) 167 struct bio_set *bs)
63{ 168{
@@ -134,24 +239,22 @@ fallback:
134 239
135void bio_free(struct bio *bio, struct bio_set *bs) 240void bio_free(struct bio *bio, struct bio_set *bs)
136{ 241{
137 if (bio->bi_io_vec) { 242 void *p;
138 const int pool_idx = BIO_POOL_IDX(bio);
139 243
140 BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS); 244 if (bio->bi_io_vec)
141 245 bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
142 if (pool_idx == BIOVEC_MAX_IDX)
143 mempool_free(bio->bi_io_vec, bs->bvec_pool);
144 else {
145 struct biovec_slab *bvs = bvec_slabs + pool_idx;
146
147 kmem_cache_free(bvs->slab, bio->bi_io_vec);
148 }
149 }
150 246
151 if (bio_integrity(bio)) 247 if (bio_integrity(bio))
152 bio_integrity_free(bio, bs); 248 bio_integrity_free(bio, bs);
153 249
154 mempool_free(bio, bs->bio_pool); 250 /*
251 * If we have front padding, adjust the bio pointer before freeing
252 */
253 p = bio;
254 if (bs->front_pad)
255 p -= bs->front_pad;
256
257 mempool_free(p, bs->bio_pool);
155} 258}
156 259
157/* 260/*
@@ -188,16 +291,20 @@ void bio_init(struct bio *bio)
188 * for a &struct bio to become free. If a %NULL @bs is passed in, we will 291 * for a &struct bio to become free. If a %NULL @bs is passed in, we will
189 * fall back to just using @kmalloc to allocate the required memory. 292 * fall back to just using @kmalloc to allocate the required memory.
190 * 293 *
191 * allocate bio and iovecs from the memory pools specified by the 294 * Note that the caller must set ->bi_destructor on succesful return
192 * bio_set structure, or @kmalloc if none given. 295 * of a bio, to do the appropriate freeing of the bio once the reference
296 * count drops to zero.
193 **/ 297 **/
194struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) 298struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
195{ 299{
196 struct bio *bio; 300 struct bio *bio = NULL;
301
302 if (bs) {
303 void *p = mempool_alloc(bs->bio_pool, gfp_mask);
197 304
198 if (bs) 305 if (p)
199 bio = mempool_alloc(bs->bio_pool, gfp_mask); 306 bio = p + bs->front_pad;
200 else 307 } else
201 bio = kmalloc(sizeof(*bio), gfp_mask); 308 bio = kmalloc(sizeof(*bio), gfp_mask);
202 309
203 if (likely(bio)) { 310 if (likely(bio)) {
@@ -1398,11 +1505,25 @@ void bioset_free(struct bio_set *bs)
1398 1505
1399 bioset_integrity_free(bs); 1506 bioset_integrity_free(bs);
1400 biovec_free_pools(bs); 1507 biovec_free_pools(bs);
1508 bio_put_slab(bs);
1401 1509
1402 kfree(bs); 1510 kfree(bs);
1403} 1511}
1404 1512
1405struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size) 1513/**
1514 * bioset_create - Create a bio_set
1515 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1516 * @front_pad: Number of bytes to allocate in front of the returned bio
1517 *
1518 * Description:
1519 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1520 * to ask for a number of bytes to be allocated in front of the bio.
1521 * Front pad allocation is useful for embedding the bio inside
1522 * another structure, to avoid allocating extra data to go with the bio.
1523 * Note that the bio must be embedded at the END of that structure always,
1524 * or things will break badly.
1525 */
1526struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
1406{ 1527{
1407 struct bio_set *bs; 1528 struct bio_set *bs;
1408 1529
@@ -1410,16 +1531,22 @@ struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size)
1410 if (!bs) 1531 if (!bs)
1411 return NULL; 1532 return NULL;
1412 1533
1413 bs->bio_slab = bio_slab; 1534 bs->front_pad = front_pad;
1414 1535
1415 bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bs->bio_slab); 1536 bs->bio_slab = bio_find_or_create_slab(front_pad);
1537 if (!bs->bio_slab) {
1538 kfree(bs);
1539 return NULL;
1540 }
1541
1542 bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
1416 if (!bs->bio_pool) 1543 if (!bs->bio_pool)
1417 goto bad; 1544 goto bad;
1418 1545
1419 if (bioset_integrity_create(bs, bio_pool_size)) 1546 if (bioset_integrity_create(bs, pool_size))
1420 goto bad; 1547 goto bad;
1421 1548
1422 if (!biovec_create_pools(bs, bvec_pool_size)) 1549 if (!biovec_create_pools(bs, pool_size))
1423 return bs; 1550 return bs;
1424 1551
1425bad: 1552bad:
@@ -1443,12 +1570,16 @@ static void __init biovec_init_slabs(void)
1443 1570
1444static int __init init_bio(void) 1571static int __init init_bio(void)
1445{ 1572{
1446 bio_slab = KMEM_CACHE(bio, SLAB_HWCACHE_ALIGN|SLAB_PANIC); 1573 bio_slab_max = 2;
1574 bio_slab_nr = 0;
1575 bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
1576 if (!bio_slabs)
1577 panic("bio: can't allocate bios\n");
1447 1578
1448 bio_integrity_init_slab(); 1579 bio_integrity_init_slab();
1449 biovec_init_slabs(); 1580 biovec_init_slabs();
1450 1581
1451 fs_bio_set = bioset_create(BIO_POOL_SIZE, 2); 1582 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
1452 if (!fs_bio_set) 1583 if (!fs_bio_set)
1453 panic("bio: can't allocate bios\n"); 1584 panic("bio: can't allocate bios\n");
1454 1585
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 9340098d75dc..4b80d3537f97 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -334,7 +334,7 @@ struct bio_pair {
334extern struct bio_pair *bio_split(struct bio *bi, int first_sectors); 334extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
335extern void bio_pair_release(struct bio_pair *dbio); 335extern void bio_pair_release(struct bio_pair *dbio);
336 336
337extern struct bio_set *bioset_create(int, int); 337extern struct bio_set *bioset_create(unsigned int, unsigned int);
338extern void bioset_free(struct bio_set *); 338extern void bioset_free(struct bio_set *);
339 339
340extern struct bio *bio_alloc(gfp_t, int); 340extern struct bio *bio_alloc(gfp_t, int);
@@ -379,6 +379,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *,
379extern int bio_uncopy_user(struct bio *); 379extern int bio_uncopy_user(struct bio *);
380void zero_fill_bio(struct bio *bio); 380void zero_fill_bio(struct bio *bio);
381extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *); 381extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
382extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
382extern unsigned int bvec_nr_vecs(unsigned short idx); 383extern unsigned int bvec_nr_vecs(unsigned short idx);
383 384
384/* 385/*
@@ -401,6 +402,8 @@ static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu)
401 402
402struct bio_set { 403struct bio_set {
403 struct kmem_cache *bio_slab; 404 struct kmem_cache *bio_slab;
405 unsigned int front_pad;
406
404 mempool_t *bio_pool; 407 mempool_t *bio_pool;
405#if defined(CONFIG_BLK_DEV_INTEGRITY) 408#if defined(CONFIG_BLK_DEV_INTEGRITY)
406 mempool_t *bio_integrity_pool; 409 mempool_t *bio_integrity_pool;
@@ -415,6 +418,7 @@ struct biovec_slab {
415}; 418};
416 419
417extern struct bio_set *fs_bio_set; 420extern struct bio_set *fs_bio_set;
421extern struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly;
418 422
419/* 423/*
420 * a small number of entries is fine, not going to be performance critical. 424 * a small number of entries is fine, not going to be performance critical.