diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-04-02 04:06:42 -0400 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2007-04-30 03:08:17 -0400 |
commit | 5972511b77809cb7c9ccdb79b825c54921c5c546 (patch) | |
tree | 8dec5821badf9750be04f339f0f621fab2114c8a /fs/bio.c | |
parent | b9099ff63c75216d6ca10bce5a1abcd9293c27e6 (diff) |
[BLOCK] Don't pin lots of memory in mempools
Currently we scale the mempool sizes depending on memory installed
in the machine, except for the bio pool itself which sits at a fixed
256 entry pre-allocation.
There's really no point in "optimizing" this OOM path, we just need
enough preallocated to make progress. A single unit is enough, lets
scale it down to 2 just to be on the safe side.
This patch saves ~150kb of pinned kernel memory on a 32-bit box.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'fs/bio.c')
-rw-r--r-- | fs/bio.c | 41 |
1 files changed, 6 insertions, 35 deletions
@@ -28,7 +28,7 @@ | |||
28 | #include <linux/blktrace_api.h> | 28 | #include <linux/blktrace_api.h> |
29 | #include <scsi/sg.h> /* for struct sg_iovec */ | 29 | #include <scsi/sg.h> /* for struct sg_iovec */ |
30 | 30 | ||
31 | #define BIO_POOL_SIZE 256 | 31 | #define BIO_POOL_SIZE 2 |
32 | 32 | ||
33 | static struct kmem_cache *bio_slab __read_mostly; | 33 | static struct kmem_cache *bio_slab __read_mostly; |
34 | 34 | ||
@@ -38,7 +38,7 @@ static struct kmem_cache *bio_slab __read_mostly; | |||
38 | * a small number of entries is fine, not going to be performance critical. | 38 | * a small number of entries is fine, not going to be performance critical. |
39 | * basically we just need to survive | 39 | * basically we just need to survive |
40 | */ | 40 | */ |
41 | #define BIO_SPLIT_ENTRIES 8 | 41 | #define BIO_SPLIT_ENTRIES 2 |
42 | mempool_t *bio_split_pool __read_mostly; | 42 | mempool_t *bio_split_pool __read_mostly; |
43 | 43 | ||
44 | struct biovec_slab { | 44 | struct biovec_slab { |
@@ -1120,7 +1120,7 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) | |||
1120 | * create memory pools for biovec's in a bio_set. | 1120 | * create memory pools for biovec's in a bio_set. |
1121 | * use the global biovec slabs created for general use. | 1121 | * use the global biovec slabs created for general use. |
1122 | */ | 1122 | */ |
1123 | static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale) | 1123 | static int biovec_create_pools(struct bio_set *bs, int pool_entries) |
1124 | { | 1124 | { |
1125 | int i; | 1125 | int i; |
1126 | 1126 | ||
@@ -1128,9 +1128,6 @@ static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale) | |||
1128 | struct biovec_slab *bp = bvec_slabs + i; | 1128 | struct biovec_slab *bp = bvec_slabs + i; |
1129 | mempool_t **bvp = bs->bvec_pools + i; | 1129 | mempool_t **bvp = bs->bvec_pools + i; |
1130 | 1130 | ||
1131 | if (pool_entries > 1 && i >= scale) | ||
1132 | pool_entries >>= 1; | ||
1133 | |||
1134 | *bvp = mempool_create_slab_pool(pool_entries, bp->slab); | 1131 | *bvp = mempool_create_slab_pool(pool_entries, bp->slab); |
1135 | if (!*bvp) | 1132 | if (!*bvp) |
1136 | return -ENOMEM; | 1133 | return -ENOMEM; |
@@ -1161,7 +1158,7 @@ void bioset_free(struct bio_set *bs) | |||
1161 | kfree(bs); | 1158 | kfree(bs); |
1162 | } | 1159 | } |
1163 | 1160 | ||
1164 | struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale) | 1161 | struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size) |
1165 | { | 1162 | { |
1166 | struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL); | 1163 | struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL); |
1167 | 1164 | ||
@@ -1172,7 +1169,7 @@ struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale) | |||
1172 | if (!bs->bio_pool) | 1169 | if (!bs->bio_pool) |
1173 | goto bad; | 1170 | goto bad; |
1174 | 1171 | ||
1175 | if (!biovec_create_pools(bs, bvec_pool_size, scale)) | 1172 | if (!biovec_create_pools(bs, bvec_pool_size)) |
1176 | return bs; | 1173 | return bs; |
1177 | 1174 | ||
1178 | bad: | 1175 | bad: |
@@ -1196,38 +1193,12 @@ static void __init biovec_init_slabs(void) | |||
1196 | 1193 | ||
1197 | static int __init init_bio(void) | 1194 | static int __init init_bio(void) |
1198 | { | 1195 | { |
1199 | int megabytes, bvec_pool_entries; | ||
1200 | int scale = BIOVEC_NR_POOLS; | ||
1201 | |||
1202 | bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0, | 1196 | bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0, |
1203 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); | 1197 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); |
1204 | 1198 | ||
1205 | biovec_init_slabs(); | 1199 | biovec_init_slabs(); |
1206 | 1200 | ||
1207 | megabytes = nr_free_pages() >> (20 - PAGE_SHIFT); | 1201 | fs_bio_set = bioset_create(BIO_POOL_SIZE, 2); |
1208 | |||
1209 | /* | ||
1210 | * find out where to start scaling | ||
1211 | */ | ||
1212 | if (megabytes <= 16) | ||
1213 | scale = 0; | ||
1214 | else if (megabytes <= 32) | ||
1215 | scale = 1; | ||
1216 | else if (megabytes <= 64) | ||
1217 | scale = 2; | ||
1218 | else if (megabytes <= 96) | ||
1219 | scale = 3; | ||
1220 | else if (megabytes <= 128) | ||
1221 | scale = 4; | ||
1222 | |||
1223 | /* | ||
1224 | * Limit number of entries reserved -- mempools are only used when | ||
1225 | * the system is completely unable to allocate memory, so we only | ||
1226 | * need enough to make progress. | ||
1227 | */ | ||
1228 | bvec_pool_entries = 1 + scale; | ||
1229 | |||
1230 | fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale); | ||
1231 | if (!fs_bio_set) | 1202 | if (!fs_bio_set) |
1232 | panic("bio: can't allocate bios\n"); | 1203 | panic("bio: can't allocate bios\n"); |
1233 | 1204 | ||