aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-12-10 09:35:05 -0500
committerJens Axboe <jens.axboe@oracle.com>2008-12-29 02:29:23 -0500
commitbb799ca0202a360fa74d5f17039b9100caebdde7 (patch)
tree048b6cedfd2644edd82a606db6d9e8b19d31328b /fs
parent1b4344986926da324b5cd10b683e5a1a5e1b7db3 (diff)
bio: allow individual slabs in the bio_set
Instead of having a global bio slab cache, add a reference to one in each bio_set that is created. This allows for personalized slabs in each bio_set, so that they can have bios of different sizes. This means we can personalize the bios we return. File systems may want to embed the bio inside another structure, to avoid allocation more items (and stuffing them in ->bi_private) after the get a bio. Or we may want to embed a number of bio_vecs directly at the end of a bio, to avoid doing two allocations to return a bio. This is now possible. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/bio-integrity.c2
-rw-r--r--fs/bio.c191
2 files changed, 162 insertions, 31 deletions
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 19caf7c962ac..77ebc3c263d6 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -111,7 +111,7 @@ void bio_integrity_free(struct bio *bio, struct bio_set *bs)
111 && bip->bip_buf != NULL) 111 && bip->bip_buf != NULL)
112 kfree(bip->bip_buf); 112 kfree(bip->bip_buf);
113 113
114 mempool_free(bip->bip_vec, bs->bvec_pools[bip->bip_pool]); 114 bvec_free_bs(bs, bip->bip_vec, bip->bip_pool);
115 mempool_free(bip, bs->bio_integrity_pool); 115 mempool_free(bip, bs->bio_integrity_pool);
116 116
117 bio->bi_integrity = NULL; 117 bio->bi_integrity = NULL;
diff --git a/fs/bio.c b/fs/bio.c
index 1ab8986b0411..0146f80789e9 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -31,8 +31,6 @@
31 31
32DEFINE_TRACE(block_split); 32DEFINE_TRACE(block_split);
33 33
34static struct kmem_cache *bio_slab __read_mostly;
35
36static mempool_t *bio_split_pool __read_mostly; 34static mempool_t *bio_split_pool __read_mostly;
37 35
38/* 36/*
@@ -40,9 +38,8 @@ static mempool_t *bio_split_pool __read_mostly;
40 * break badly! cannot be bigger than what you can fit into an 38 * break badly! cannot be bigger than what you can fit into an
41 * unsigned short 39 * unsigned short
42 */ 40 */
43
44#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } 41#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
45static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = { 42struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
46 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), 43 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
47}; 44};
48#undef BV 45#undef BV
@@ -53,11 +50,119 @@ static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
53 */ 50 */
54struct bio_set *fs_bio_set; 51struct bio_set *fs_bio_set;
55 52
53/*
54 * Our slab pool management
55 */
56struct bio_slab {
57 struct kmem_cache *slab;
58 unsigned int slab_ref;
59 unsigned int slab_size;
60 char name[8];
61};
62static DEFINE_MUTEX(bio_slab_lock);
63static struct bio_slab *bio_slabs;
64static unsigned int bio_slab_nr, bio_slab_max;
65
66static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
67{
68 unsigned int sz = sizeof(struct bio) + extra_size;
69 struct kmem_cache *slab = NULL;
70 struct bio_slab *bslab;
71 unsigned int i, entry = -1;
72
73 mutex_lock(&bio_slab_lock);
74
75 i = 0;
76 while (i < bio_slab_nr) {
77 struct bio_slab *bslab = &bio_slabs[i];
78
79 if (!bslab->slab && entry == -1)
80 entry = i;
81 else if (bslab->slab_size == sz) {
82 slab = bslab->slab;
83 bslab->slab_ref++;
84 break;
85 }
86 i++;
87 }
88
89 if (slab)
90 goto out_unlock;
91
92 if (bio_slab_nr == bio_slab_max && entry == -1) {
93 bio_slab_max <<= 1;
94 bio_slabs = krealloc(bio_slabs,
95 bio_slab_max * sizeof(struct bio_slab),
96 GFP_KERNEL);
97 if (!bio_slabs)
98 goto out_unlock;
99 }
100 if (entry == -1)
101 entry = bio_slab_nr++;
102
103 bslab = &bio_slabs[entry];
104
105 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
106 slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL);
107 if (!slab)
108 goto out_unlock;
109
110 printk("bio: create slab <%s> at %d\n", bslab->name, entry);
111 bslab->slab = slab;
112 bslab->slab_ref = 1;
113 bslab->slab_size = sz;
114out_unlock:
115 mutex_unlock(&bio_slab_lock);
116 return slab;
117}
118
119static void bio_put_slab(struct bio_set *bs)
120{
121 struct bio_slab *bslab = NULL;
122 unsigned int i;
123
124 mutex_lock(&bio_slab_lock);
125
126 for (i = 0; i < bio_slab_nr; i++) {
127 if (bs->bio_slab == bio_slabs[i].slab) {
128 bslab = &bio_slabs[i];
129 break;
130 }
131 }
132
133 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
134 goto out;
135
136 WARN_ON(!bslab->slab_ref);
137
138 if (--bslab->slab_ref)
139 goto out;
140
141 kmem_cache_destroy(bslab->slab);
142 bslab->slab = NULL;
143
144out:
145 mutex_unlock(&bio_slab_lock);
146}
147
56unsigned int bvec_nr_vecs(unsigned short idx) 148unsigned int bvec_nr_vecs(unsigned short idx)
57{ 149{
58 return bvec_slabs[idx].nr_vecs; 150 return bvec_slabs[idx].nr_vecs;
59} 151}
60 152
153void bvec_free_bs(struct bio_set *bs, struct bio_vec *bv, unsigned int idx)
154{
155 BIO_BUG_ON(idx >= BIOVEC_NR_POOLS);
156
157 if (idx == BIOVEC_MAX_IDX)
158 mempool_free(bv, bs->bvec_pool);
159 else {
160 struct biovec_slab *bvs = bvec_slabs + idx;
161
162 kmem_cache_free(bvs->slab, bv);
163 }
164}
165
61struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, 166struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx,
62 struct bio_set *bs) 167 struct bio_set *bs)
63{ 168{
@@ -134,24 +239,22 @@ fallback:
134 239
135void bio_free(struct bio *bio, struct bio_set *bs) 240void bio_free(struct bio *bio, struct bio_set *bs)
136{ 241{
137 if (bio->bi_io_vec) { 242 void *p;
138 const int pool_idx = BIO_POOL_IDX(bio);
139 243
140 BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS); 244 if (bio->bi_io_vec)
141 245 bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
142 if (pool_idx == BIOVEC_MAX_IDX)
143 mempool_free(bio->bi_io_vec, bs->bvec_pool);
144 else {
145 struct biovec_slab *bvs = bvec_slabs + pool_idx;
146
147 kmem_cache_free(bvs->slab, bio->bi_io_vec);
148 }
149 }
150 246
151 if (bio_integrity(bio)) 247 if (bio_integrity(bio))
152 bio_integrity_free(bio, bs); 248 bio_integrity_free(bio, bs);
153 249
154 mempool_free(bio, bs->bio_pool); 250 /*
251 * If we have front padding, adjust the bio pointer before freeing
252 */
253 p = bio;
254 if (bs->front_pad)
255 p -= bs->front_pad;
256
257 mempool_free(p, bs->bio_pool);
155} 258}
156 259
157/* 260/*
@@ -188,16 +291,20 @@ void bio_init(struct bio *bio)
188 * for a &struct bio to become free. If a %NULL @bs is passed in, we will 291 * for a &struct bio to become free. If a %NULL @bs is passed in, we will
189 * fall back to just using @kmalloc to allocate the required memory. 292 * fall back to just using @kmalloc to allocate the required memory.
190 * 293 *
191 * allocate bio and iovecs from the memory pools specified by the 294 * Note that the caller must set ->bi_destructor on succesful return
192 * bio_set structure, or @kmalloc if none given. 295 * of a bio, to do the appropriate freeing of the bio once the reference
296 * count drops to zero.
193 **/ 297 **/
194struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) 298struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
195{ 299{
196 struct bio *bio; 300 struct bio *bio = NULL;
301
302 if (bs) {
303 void *p = mempool_alloc(bs->bio_pool, gfp_mask);
197 304
198 if (bs) 305 if (p)
199 bio = mempool_alloc(bs->bio_pool, gfp_mask); 306 bio = p + bs->front_pad;
200 else 307 } else
201 bio = kmalloc(sizeof(*bio), gfp_mask); 308 bio = kmalloc(sizeof(*bio), gfp_mask);
202 309
203 if (likely(bio)) { 310 if (likely(bio)) {
@@ -1398,11 +1505,25 @@ void bioset_free(struct bio_set *bs)
1398 1505
1399 bioset_integrity_free(bs); 1506 bioset_integrity_free(bs);
1400 biovec_free_pools(bs); 1507 biovec_free_pools(bs);
1508 bio_put_slab(bs);
1401 1509
1402 kfree(bs); 1510 kfree(bs);
1403} 1511}
1404 1512
1405struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size) 1513/**
1514 * bioset_create - Create a bio_set
1515 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1516 * @front_pad: Number of bytes to allocate in front of the returned bio
1517 *
1518 * Description:
1519 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1520 * to ask for a number of bytes to be allocated in front of the bio.
1521 * Front pad allocation is useful for embedding the bio inside
1522 * another structure, to avoid allocating extra data to go with the bio.
1523 * Note that the bio must be embedded at the END of that structure always,
1524 * or things will break badly.
1525 */
1526struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
1406{ 1527{
1407 struct bio_set *bs; 1528 struct bio_set *bs;
1408 1529
@@ -1410,16 +1531,22 @@ struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size)
1410 if (!bs) 1531 if (!bs)
1411 return NULL; 1532 return NULL;
1412 1533
1413 bs->bio_slab = bio_slab; 1534 bs->front_pad = front_pad;
1414 1535
1415 bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bs->bio_slab); 1536 bs->bio_slab = bio_find_or_create_slab(front_pad);
1537 if (!bs->bio_slab) {
1538 kfree(bs);
1539 return NULL;
1540 }
1541
1542 bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
1416 if (!bs->bio_pool) 1543 if (!bs->bio_pool)
1417 goto bad; 1544 goto bad;
1418 1545
1419 if (bioset_integrity_create(bs, bio_pool_size)) 1546 if (bioset_integrity_create(bs, pool_size))
1420 goto bad; 1547 goto bad;
1421 1548
1422 if (!biovec_create_pools(bs, bvec_pool_size)) 1549 if (!biovec_create_pools(bs, pool_size))
1423 return bs; 1550 return bs;
1424 1551
1425bad: 1552bad:
@@ -1443,12 +1570,16 @@ static void __init biovec_init_slabs(void)
1443 1570
1444static int __init init_bio(void) 1571static int __init init_bio(void)
1445{ 1572{
1446 bio_slab = KMEM_CACHE(bio, SLAB_HWCACHE_ALIGN|SLAB_PANIC); 1573 bio_slab_max = 2;
1574 bio_slab_nr = 0;
1575 bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
1576 if (!bio_slabs)
1577 panic("bio: can't allocate bios\n");
1447 1578
1448 bio_integrity_init_slab(); 1579 bio_integrity_init_slab();
1449 biovec_init_slabs(); 1580 biovec_init_slabs();
1450 1581
1451 fs_bio_set = bioset_create(BIO_POOL_SIZE, 2); 1582 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
1452 if (!fs_bio_set) 1583 if (!fs_bio_set)
1453 panic("bio: can't allocate bios\n"); 1584 panic("bio: can't allocate bios\n");
1454 1585