aboutsummaryrefslogtreecommitdiffstats
path: root/fs/bio.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-12-11 05:53:43 -0500
committerJens Axboe <jens.axboe@oracle.com>2008-12-29 02:28:46 -0500
commit7ff9345ffac56743b5001561bc2dc1e041b79149 (patch)
treeaede8c4b4b52c7808cdea7ec039655accffd2298 /fs/bio.c
parenta31a97381cdf7dceb03b797a8faf9bc8a01c65d1 (diff)
bio: only mempool back the largest bio_vec slab cache
We only very rarely need the mempool backing, so it makes sense to get rid of all but one of the mempool in a bio_set. So keep the largest bio_vec count mempool so we can always honor the largest allocation, and "upgrade" callers that fail. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'fs/bio.c')
-rw-r--r--fs/bio.c125
1 files changed, 72 insertions, 53 deletions
diff --git a/fs/bio.c b/fs/bio.c
index df99c882b807..eb6b4683a265 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -58,7 +58,8 @@ unsigned int bvec_nr_vecs(unsigned short idx)
58 return bvec_slabs[idx].nr_vecs; 58 return bvec_slabs[idx].nr_vecs;
59} 59}
60 60
61struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) 61struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx,
62 struct bio_set *bs)
62{ 63{
63 struct bio_vec *bvl; 64 struct bio_vec *bvl;
64 65
@@ -67,60 +68,90 @@ struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct
67 * If not, this is a bio_kmalloc() allocation and just do a 68 * If not, this is a bio_kmalloc() allocation and just do a
68 * kzalloc() for the exact number of vecs right away. 69 * kzalloc() for the exact number of vecs right away.
69 */ 70 */
70 if (bs) { 71 if (!bs)
72 bvl = kzalloc(nr * sizeof(struct bio_vec), gfp_mask);
73
74 /*
75 * see comment near bvec_array define!
76 */
77 switch (nr) {
78 case 1:
79 *idx = 0;
80 break;
81 case 2 ... 4:
82 *idx = 1;
83 break;
84 case 5 ... 16:
85 *idx = 2;
86 break;
87 case 17 ... 64:
88 *idx = 3;
89 break;
90 case 65 ... 128:
91 *idx = 4;
92 break;
93 case 129 ... BIO_MAX_PAGES:
94 *idx = 5;
95 break;
96 default:
97 return NULL;
98 }
99
100 /*
101 * idx now points to the pool we want to allocate from. only the
102 * 1-vec entry pool is mempool backed.
103 */
104 if (*idx == BIOVEC_MAX_IDX) {
105fallback:
106 bvl = mempool_alloc(bs->bvec_pool, gfp_mask);
107 } else {
108 struct biovec_slab *bvs = bvec_slabs + *idx;
109 gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
110
71 /* 111 /*
72 * see comment near bvec_array define! 112 * Make this allocation restricted and don't dump info on
113 * allocation failures, since we'll fallback to the mempool
114 * in case of failure.
73 */ 115 */
74 switch (nr) { 116 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
75 case 1:
76 *idx = 0;
77 break;
78 case 2 ... 4:
79 *idx = 1;
80 break;
81 case 5 ... 16:
82 *idx = 2;
83 break;
84 case 17 ... 64:
85 *idx = 3;
86 break;
87 case 65 ... 128:
88 *idx = 4;
89 break;
90 case 129 ... BIO_MAX_PAGES:
91 *idx = 5;
92 break;
93 default:
94 return NULL;
95 }
96 117
97 /* 118 /*
98 * idx now points to the pool we want to allocate from 119 * Try a slab allocation. If this fails and __GFP_WAIT
120 * is set, retry with the 1-entry mempool
99 */ 121 */
100 bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask); 122 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
101 if (bvl) 123 if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) {
102 memset(bvl, 0, 124 *idx = BIOVEC_MAX_IDX;
103 bvec_nr_vecs(*idx) * sizeof(struct bio_vec)); 125 goto fallback;
104 } else 126 }
105 bvl = kzalloc(nr * sizeof(struct bio_vec), gfp_mask); 127 }
128
129 if (bvl)
130 memset(bvl, 0, bvec_nr_vecs(*idx) * sizeof(struct bio_vec));
106 131
107 return bvl; 132 return bvl;
108} 133}
109 134
110void bio_free(struct bio *bio, struct bio_set *bio_set) 135void bio_free(struct bio *bio, struct bio_set *bs)
111{ 136{
112 if (bio->bi_io_vec) { 137 if (bio->bi_io_vec) {
113 const int pool_idx = BIO_POOL_IDX(bio); 138 const int pool_idx = BIO_POOL_IDX(bio);
114 139
115 BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS); 140 BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
116 141
117 mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]); 142 if (pool_idx == BIOVEC_MAX_IDX)
143 mempool_free(bio->bi_io_vec, bs->bvec_pool);
144 else {
145 struct biovec_slab *bvs = bvec_slabs + pool_idx;
146
147 kmem_cache_free(bvs->slab, bio->bi_io_vec);
148 }
118 } 149 }
119 150
120 if (bio_integrity(bio)) 151 if (bio_integrity(bio))
121 bio_integrity_free(bio, bio_set); 152 bio_integrity_free(bio, bs);
122 153
123 mempool_free(bio, bio_set->bio_pool); 154 mempool_free(bio, bs->bio_pool);
124} 155}
125 156
126/* 157/*
@@ -1346,30 +1377,18 @@ EXPORT_SYMBOL(bio_sector_offset);
1346 */ 1377 */
1347static int biovec_create_pools(struct bio_set *bs, int pool_entries) 1378static int biovec_create_pools(struct bio_set *bs, int pool_entries)
1348{ 1379{
1349 int i; 1380 struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX;
1350 1381
1351 for (i = 0; i < BIOVEC_NR_POOLS; i++) { 1382 bs->bvec_pool = mempool_create_slab_pool(pool_entries, bp->slab);
1352 struct biovec_slab *bp = bvec_slabs + i; 1383 if (!bs->bvec_pool)
1353 mempool_t **bvp = bs->bvec_pools + i; 1384 return -ENOMEM;
1354 1385
1355 *bvp = mempool_create_slab_pool(pool_entries, bp->slab);
1356 if (!*bvp)
1357 return -ENOMEM;
1358 }
1359 return 0; 1386 return 0;
1360} 1387}
1361 1388
1362static void biovec_free_pools(struct bio_set *bs) 1389static void biovec_free_pools(struct bio_set *bs)
1363{ 1390{
1364 int i; 1391 mempool_destroy(bs->bvec_pool);
1365
1366 for (i = 0; i < BIOVEC_NR_POOLS; i++) {
1367 mempool_t *bvp = bs->bvec_pools[i];
1368
1369 if (bvp)
1370 mempool_destroy(bvp);
1371 }
1372
1373} 1392}
1374 1393
1375void bioset_free(struct bio_set *bs) 1394void bioset_free(struct bio_set *bs)