aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-04-30 11:12:39 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-04-30 11:12:39 -0400
commitcd9bb7e7367c03400d6e918fd3502820fc3b9084 (patch)
tree66eda61f9b28eff39a91b7a819579616161266e3 /fs
parent24a77daf3d80bddcece044e6dc3675e427eef3f3 (diff)
parent07e44708059010aa26c6a4c8ee6ff11743d04d4e (diff)
Merge branch 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block: [PATCH] elevator: elv_list_lock does not need irq disabling [BLOCK] Don't pin lots of memory in mempools cfq-iosched: speedup cic rb lookup ll_rw_blk: add io_context private pointer cfq-iosched: get rid of cfqq hash cfq-iosched: tighten queue request overlap condition cfq-iosched: improve sync vs async workloads cfq-iosched: never allow an async queue idling cfq-iosched: get rid of ->dispatch_slice cfq-iosched: don't pass unused preemption variable around cfq-iosched: get rid of ->cur_rr and ->cfq_list cfq-iosched: slice offset should take ioprio into account [PATCH] cfq-iosched: style cleanups and comments cfq-iosched: sort IDLE queues into the rbtree cfq-iosched: sort RT queues into the rbtree [PATCH] cfq-iosched: speed up rbtree handling cfq-iosched: rework the whole round-robin list concept cfq-iosched: minor updates cfq-iosched: development update cfq-iosched: improve preemption for cooperating tasks
Diffstat (limited to 'fs')
-rw-r--r--fs/bio.c41
1 files changed, 6 insertions, 35 deletions
diff --git a/fs/bio.c b/fs/bio.c
index 7618bcb18368..693940da4090 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -28,7 +28,7 @@
28#include <linux/blktrace_api.h> 28#include <linux/blktrace_api.h>
29#include <scsi/sg.h> /* for struct sg_iovec */ 29#include <scsi/sg.h> /* for struct sg_iovec */
30 30
31#define BIO_POOL_SIZE 256 31#define BIO_POOL_SIZE 2
32 32
33static struct kmem_cache *bio_slab __read_mostly; 33static struct kmem_cache *bio_slab __read_mostly;
34 34
@@ -38,7 +38,7 @@ static struct kmem_cache *bio_slab __read_mostly;
38 * a small number of entries is fine, not going to be performance critical. 38 * a small number of entries is fine, not going to be performance critical.
39 * basically we just need to survive 39 * basically we just need to survive
40 */ 40 */
41#define BIO_SPLIT_ENTRIES 8 41#define BIO_SPLIT_ENTRIES 2
42mempool_t *bio_split_pool __read_mostly; 42mempool_t *bio_split_pool __read_mostly;
43 43
44struct biovec_slab { 44struct biovec_slab {
@@ -1120,7 +1120,7 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
1120 * create memory pools for biovec's in a bio_set. 1120 * create memory pools for biovec's in a bio_set.
1121 * use the global biovec slabs created for general use. 1121 * use the global biovec slabs created for general use.
1122 */ 1122 */
1123static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale) 1123static int biovec_create_pools(struct bio_set *bs, int pool_entries)
1124{ 1124{
1125 int i; 1125 int i;
1126 1126
@@ -1128,9 +1128,6 @@ static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale)
1128 struct biovec_slab *bp = bvec_slabs + i; 1128 struct biovec_slab *bp = bvec_slabs + i;
1129 mempool_t **bvp = bs->bvec_pools + i; 1129 mempool_t **bvp = bs->bvec_pools + i;
1130 1130
1131 if (pool_entries > 1 && i >= scale)
1132 pool_entries >>= 1;
1133
1134 *bvp = mempool_create_slab_pool(pool_entries, bp->slab); 1131 *bvp = mempool_create_slab_pool(pool_entries, bp->slab);
1135 if (!*bvp) 1132 if (!*bvp)
1136 return -ENOMEM; 1133 return -ENOMEM;
@@ -1161,7 +1158,7 @@ void bioset_free(struct bio_set *bs)
1161 kfree(bs); 1158 kfree(bs);
1162} 1159}
1163 1160
1164struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale) 1161struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size)
1165{ 1162{
1166 struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL); 1163 struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL);
1167 1164
@@ -1172,7 +1169,7 @@ struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale)
1172 if (!bs->bio_pool) 1169 if (!bs->bio_pool)
1173 goto bad; 1170 goto bad;
1174 1171
1175 if (!biovec_create_pools(bs, bvec_pool_size, scale)) 1172 if (!biovec_create_pools(bs, bvec_pool_size))
1176 return bs; 1173 return bs;
1177 1174
1178bad: 1175bad:
@@ -1196,38 +1193,12 @@ static void __init biovec_init_slabs(void)
1196 1193
1197static int __init init_bio(void) 1194static int __init init_bio(void)
1198{ 1195{
1199 int megabytes, bvec_pool_entries;
1200 int scale = BIOVEC_NR_POOLS;
1201
1202 bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0, 1196 bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0,
1203 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1197 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1204 1198
1205 biovec_init_slabs(); 1199 biovec_init_slabs();
1206 1200
1207 megabytes = nr_free_pages() >> (20 - PAGE_SHIFT); 1201 fs_bio_set = bioset_create(BIO_POOL_SIZE, 2);
1208
1209 /*
1210 * find out where to start scaling
1211 */
1212 if (megabytes <= 16)
1213 scale = 0;
1214 else if (megabytes <= 32)
1215 scale = 1;
1216 else if (megabytes <= 64)
1217 scale = 2;
1218 else if (megabytes <= 96)
1219 scale = 3;
1220 else if (megabytes <= 128)
1221 scale = 4;
1222
1223 /*
1224 * Limit number of entries reserved -- mempools are only used when
1225 * the system is completely unable to allocate memory, so we only
1226 * need enough to make progress.
1227 */
1228 bvec_pool_entries = 1 + scale;
1229
1230 fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale);
1231 if (!fs_bio_set) 1202 if (!fs_bio_set)
1232 panic("bio: can't allocate bios\n"); 1203 panic("bio: can't allocate bios\n");
1233 1204