diff options
Diffstat (limited to 'fs/bio.c')
-rw-r--r-- | fs/bio.c | 44 |
1 files changed, 16 insertions, 28 deletions
@@ -25,11 +25,12 @@ | |||
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/mempool.h> | 26 | #include <linux/mempool.h> |
27 | #include <linux/workqueue.h> | 27 | #include <linux/workqueue.h> |
28 | #include <linux/blktrace_api.h> | ||
28 | #include <scsi/sg.h> /* for struct sg_iovec */ | 29 | #include <scsi/sg.h> /* for struct sg_iovec */ |
29 | 30 | ||
30 | #define BIO_POOL_SIZE 256 | 31 | #define BIO_POOL_SIZE 256 |
31 | 32 | ||
32 | static kmem_cache_t *bio_slab; | 33 | static kmem_cache_t *bio_slab __read_mostly; |
33 | 34 | ||
34 | #define BIOVEC_NR_POOLS 6 | 35 | #define BIOVEC_NR_POOLS 6 |
35 | 36 | ||
@@ -38,7 +39,7 @@ static kmem_cache_t *bio_slab; | |||
38 | * basically we just need to survive | 39 | * basically we just need to survive |
39 | */ | 40 | */ |
40 | #define BIO_SPLIT_ENTRIES 8 | 41 | #define BIO_SPLIT_ENTRIES 8 |
41 | mempool_t *bio_split_pool; | 42 | mempool_t *bio_split_pool __read_mostly; |
42 | 43 | ||
43 | struct biovec_slab { | 44 | struct biovec_slab { |
44 | int nr_vecs; | 45 | int nr_vecs; |
@@ -635,12 +636,10 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, | |||
635 | return ERR_PTR(-ENOMEM); | 636 | return ERR_PTR(-ENOMEM); |
636 | 637 | ||
637 | ret = -ENOMEM; | 638 | ret = -ENOMEM; |
638 | pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); | 639 | pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); |
639 | if (!pages) | 640 | if (!pages) |
640 | goto out; | 641 | goto out; |
641 | 642 | ||
642 | memset(pages, 0, nr_pages * sizeof(struct page *)); | ||
643 | |||
644 | for (i = 0; i < iov_count; i++) { | 643 | for (i = 0; i < iov_count; i++) { |
645 | unsigned long uaddr = (unsigned long)iov[i].iov_base; | 644 | unsigned long uaddr = (unsigned long)iov[i].iov_base; |
646 | unsigned long len = iov[i].iov_len; | 645 | unsigned long len = iov[i].iov_len; |
@@ -1095,6 +1094,9 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) | |||
1095 | if (!bp) | 1094 | if (!bp) |
1096 | return bp; | 1095 | return bp; |
1097 | 1096 | ||
1097 | blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi, | ||
1098 | bi->bi_sector + first_sectors); | ||
1099 | |||
1098 | BUG_ON(bi->bi_vcnt != 1); | 1100 | BUG_ON(bi->bi_vcnt != 1); |
1099 | BUG_ON(bi->bi_idx != 0); | 1101 | BUG_ON(bi->bi_idx != 0); |
1100 | atomic_set(&bp->cnt, 3); | 1102 | atomic_set(&bp->cnt, 3); |
@@ -1123,16 +1125,6 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) | |||
1123 | return bp; | 1125 | return bp; |
1124 | } | 1126 | } |
1125 | 1127 | ||
1126 | static void *bio_pair_alloc(gfp_t gfp_flags, void *data) | ||
1127 | { | ||
1128 | return kmalloc(sizeof(struct bio_pair), gfp_flags); | ||
1129 | } | ||
1130 | |||
1131 | static void bio_pair_free(void *bp, void *data) | ||
1132 | { | ||
1133 | kfree(bp); | ||
1134 | } | ||
1135 | |||
1136 | 1128 | ||
1137 | /* | 1129 | /* |
1138 | * create memory pools for biovec's in a bio_set. | 1130 | * create memory pools for biovec's in a bio_set. |
@@ -1149,8 +1141,7 @@ static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale) | |||
1149 | if (i >= scale) | 1141 | if (i >= scale) |
1150 | pool_entries >>= 1; | 1142 | pool_entries >>= 1; |
1151 | 1143 | ||
1152 | *bvp = mempool_create(pool_entries, mempool_alloc_slab, | 1144 | *bvp = mempool_create_slab_pool(pool_entries, bp->slab); |
1153 | mempool_free_slab, bp->slab); | ||
1154 | if (!*bvp) | 1145 | if (!*bvp) |
1155 | return -ENOMEM; | 1146 | return -ENOMEM; |
1156 | } | 1147 | } |
@@ -1182,15 +1173,12 @@ void bioset_free(struct bio_set *bs) | |||
1182 | 1173 | ||
1183 | struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale) | 1174 | struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale) |
1184 | { | 1175 | { |
1185 | struct bio_set *bs = kmalloc(sizeof(*bs), GFP_KERNEL); | 1176 | struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL); |
1186 | 1177 | ||
1187 | if (!bs) | 1178 | if (!bs) |
1188 | return NULL; | 1179 | return NULL; |
1189 | 1180 | ||
1190 | memset(bs, 0, sizeof(*bs)); | 1181 | bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab); |
1191 | bs->bio_pool = mempool_create(bio_pool_size, mempool_alloc_slab, | ||
1192 | mempool_free_slab, bio_slab); | ||
1193 | |||
1194 | if (!bs->bio_pool) | 1182 | if (!bs->bio_pool) |
1195 | goto bad; | 1183 | goto bad; |
1196 | 1184 | ||
@@ -1243,18 +1231,18 @@ static int __init init_bio(void) | |||
1243 | scale = 4; | 1231 | scale = 4; |
1244 | 1232 | ||
1245 | /* | 1233 | /* |
1246 | * scale number of entries | 1234 | * Limit number of entries reserved -- mempools are only used when |
1235 | * the system is completely unable to allocate memory, so we only | ||
1236 | * need enough to make progress. | ||
1247 | */ | 1237 | */ |
1248 | bvec_pool_entries = megabytes * 2; | 1238 | bvec_pool_entries = 1 + scale; |
1249 | if (bvec_pool_entries > 256) | ||
1250 | bvec_pool_entries = 256; | ||
1251 | 1239 | ||
1252 | fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale); | 1240 | fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale); |
1253 | if (!fs_bio_set) | 1241 | if (!fs_bio_set) |
1254 | panic("bio: can't allocate bios\n"); | 1242 | panic("bio: can't allocate bios\n"); |
1255 | 1243 | ||
1256 | bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES, | 1244 | bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES, |
1257 | bio_pair_alloc, bio_pair_free, NULL); | 1245 | sizeof(struct bio_pair)); |
1258 | if (!bio_split_pool) | 1246 | if (!bio_split_pool) |
1259 | panic("bio: can't create split pool\n"); | 1247 | panic("bio: can't create split pool\n"); |
1260 | 1248 | ||