diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-12-30 20:20:05 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-12-30 20:20:05 -0500 |
commit | 1dff81f20cd55ffa5a8ee984da70ce0b99d29606 (patch) | |
tree | 06eb07bda250abfa8a78c3141db56862c8c7cf98 /fs/bio.c | |
parent | 179475a3b46f86e2d06f83e2312218ac3f0cf3a7 (diff) | |
parent | d3f761104b097738932afcc310fbbbbfb007ef92 (diff) |
Merge branch 'for-2.6.29' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.29' of git://git.kernel.dk/linux-2.6-block: (43 commits)
bio: get rid of bio_vec clearing
bounce: don't rely on a zeroed bio_vec list
cciss: simplify parameters to deregister_disk function
cfq-iosched: fix race between exiting queue and exiting task
loop: Do not call loop_unplug for not configured loop device.
loop: Flush possible running bios when loop device is released.
alpha: remove dead BIO_VMERGE_BOUNDARY
Get rid of CONFIG_LSF
block: make blk_softirq_init() static
block: use min_not_zero in blk_queue_stack_limits
block: add one-hit cache for disk partition lookup
cfq-iosched: remove limit of dispatch depth of max 4 times quantum
nbd: tell the block layer that it is not a rotational device
block: get rid of elevator_t typedef
aio: make the lookup_ioctx() lockless
bio: add support for inlining a number of bio_vecs inside the bio
bio: allow individual slabs in the bio_set
bio: move the slab pointer inside the bio_set
bio: only mempool back the largest bio_vec slab cache
block: don't use plugging on SSD devices
...
Diffstat (limited to 'fs/bio.c')
-rw-r--r-- | fs/bio.c | 320 |
1 files changed, 243 insertions, 77 deletions
@@ -31,7 +31,11 @@ | |||
31 | 31 | ||
32 | DEFINE_TRACE(block_split); | 32 | DEFINE_TRACE(block_split); |
33 | 33 | ||
34 | static struct kmem_cache *bio_slab __read_mostly; | 34 | /* |
35 | * Test patch to inline a certain number of bi_io_vec's inside the bio | ||
36 | * itself, to shrink a bio data allocation from two mempool calls to one | ||
37 | */ | ||
38 | #define BIO_INLINE_VECS 4 | ||
35 | 39 | ||
36 | static mempool_t *bio_split_pool __read_mostly; | 40 | static mempool_t *bio_split_pool __read_mostly; |
37 | 41 | ||
@@ -40,9 +44,8 @@ static mempool_t *bio_split_pool __read_mostly; | |||
40 | * break badly! cannot be bigger than what you can fit into an | 44 | * break badly! cannot be bigger than what you can fit into an |
41 | * unsigned short | 45 | * unsigned short |
42 | */ | 46 | */ |
43 | |||
44 | #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } | 47 | #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } |
45 | static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = { | 48 | struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = { |
46 | BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), | 49 | BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), |
47 | }; | 50 | }; |
48 | #undef BV | 51 | #undef BV |
@@ -53,12 +56,121 @@ static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = { | |||
53 | */ | 56 | */ |
54 | struct bio_set *fs_bio_set; | 57 | struct bio_set *fs_bio_set; |
55 | 58 | ||
59 | /* | ||
60 | * Our slab pool management | ||
61 | */ | ||
62 | struct bio_slab { | ||
63 | struct kmem_cache *slab; | ||
64 | unsigned int slab_ref; | ||
65 | unsigned int slab_size; | ||
66 | char name[8]; | ||
67 | }; | ||
68 | static DEFINE_MUTEX(bio_slab_lock); | ||
69 | static struct bio_slab *bio_slabs; | ||
70 | static unsigned int bio_slab_nr, bio_slab_max; | ||
71 | |||
72 | static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) | ||
73 | { | ||
74 | unsigned int sz = sizeof(struct bio) + extra_size; | ||
75 | struct kmem_cache *slab = NULL; | ||
76 | struct bio_slab *bslab; | ||
77 | unsigned int i, entry = -1; | ||
78 | |||
79 | mutex_lock(&bio_slab_lock); | ||
80 | |||
81 | i = 0; | ||
82 | while (i < bio_slab_nr) { | ||
83 | struct bio_slab *bslab = &bio_slabs[i]; | ||
84 | |||
85 | if (!bslab->slab && entry == -1) | ||
86 | entry = i; | ||
87 | else if (bslab->slab_size == sz) { | ||
88 | slab = bslab->slab; | ||
89 | bslab->slab_ref++; | ||
90 | break; | ||
91 | } | ||
92 | i++; | ||
93 | } | ||
94 | |||
95 | if (slab) | ||
96 | goto out_unlock; | ||
97 | |||
98 | if (bio_slab_nr == bio_slab_max && entry == -1) { | ||
99 | bio_slab_max <<= 1; | ||
100 | bio_slabs = krealloc(bio_slabs, | ||
101 | bio_slab_max * sizeof(struct bio_slab), | ||
102 | GFP_KERNEL); | ||
103 | if (!bio_slabs) | ||
104 | goto out_unlock; | ||
105 | } | ||
106 | if (entry == -1) | ||
107 | entry = bio_slab_nr++; | ||
108 | |||
109 | bslab = &bio_slabs[entry]; | ||
110 | |||
111 | snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry); | ||
112 | slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL); | ||
113 | if (!slab) | ||
114 | goto out_unlock; | ||
115 | |||
116 | printk("bio: create slab <%s> at %d\n", bslab->name, entry); | ||
117 | bslab->slab = slab; | ||
118 | bslab->slab_ref = 1; | ||
119 | bslab->slab_size = sz; | ||
120 | out_unlock: | ||
121 | mutex_unlock(&bio_slab_lock); | ||
122 | return slab; | ||
123 | } | ||
124 | |||
125 | static void bio_put_slab(struct bio_set *bs) | ||
126 | { | ||
127 | struct bio_slab *bslab = NULL; | ||
128 | unsigned int i; | ||
129 | |||
130 | mutex_lock(&bio_slab_lock); | ||
131 | |||
132 | for (i = 0; i < bio_slab_nr; i++) { | ||
133 | if (bs->bio_slab == bio_slabs[i].slab) { | ||
134 | bslab = &bio_slabs[i]; | ||
135 | break; | ||
136 | } | ||
137 | } | ||
138 | |||
139 | if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) | ||
140 | goto out; | ||
141 | |||
142 | WARN_ON(!bslab->slab_ref); | ||
143 | |||
144 | if (--bslab->slab_ref) | ||
145 | goto out; | ||
146 | |||
147 | kmem_cache_destroy(bslab->slab); | ||
148 | bslab->slab = NULL; | ||
149 | |||
150 | out: | ||
151 | mutex_unlock(&bio_slab_lock); | ||
152 | } | ||
153 | |||
56 | unsigned int bvec_nr_vecs(unsigned short idx) | 154 | unsigned int bvec_nr_vecs(unsigned short idx) |
57 | { | 155 | { |
58 | return bvec_slabs[idx].nr_vecs; | 156 | return bvec_slabs[idx].nr_vecs; |
59 | } | 157 | } |
60 | 158 | ||
61 | struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) | 159 | void bvec_free_bs(struct bio_set *bs, struct bio_vec *bv, unsigned int idx) |
160 | { | ||
161 | BIO_BUG_ON(idx >= BIOVEC_NR_POOLS); | ||
162 | |||
163 | if (idx == BIOVEC_MAX_IDX) | ||
164 | mempool_free(bv, bs->bvec_pool); | ||
165 | else { | ||
166 | struct biovec_slab *bvs = bvec_slabs + idx; | ||
167 | |||
168 | kmem_cache_free(bvs->slab, bv); | ||
169 | } | ||
170 | } | ||
171 | |||
172 | struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, | ||
173 | struct bio_set *bs) | ||
62 | { | 174 | { |
63 | struct bio_vec *bvl; | 175 | struct bio_vec *bvl; |
64 | 176 | ||
@@ -67,60 +179,85 @@ struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct | |||
67 | * If not, this is a bio_kmalloc() allocation and just do a | 179 | * If not, this is a bio_kmalloc() allocation and just do a |
68 | * kzalloc() for the exact number of vecs right away. | 180 | * kzalloc() for the exact number of vecs right away. |
69 | */ | 181 | */ |
70 | if (bs) { | 182 | if (!bs) |
183 | bvl = kmalloc(nr * sizeof(struct bio_vec), gfp_mask); | ||
184 | |||
185 | /* | ||
186 | * see comment near bvec_array define! | ||
187 | */ | ||
188 | switch (nr) { | ||
189 | case 1: | ||
190 | *idx = 0; | ||
191 | break; | ||
192 | case 2 ... 4: | ||
193 | *idx = 1; | ||
194 | break; | ||
195 | case 5 ... 16: | ||
196 | *idx = 2; | ||
197 | break; | ||
198 | case 17 ... 64: | ||
199 | *idx = 3; | ||
200 | break; | ||
201 | case 65 ... 128: | ||
202 | *idx = 4; | ||
203 | break; | ||
204 | case 129 ... BIO_MAX_PAGES: | ||
205 | *idx = 5; | ||
206 | break; | ||
207 | default: | ||
208 | return NULL; | ||
209 | } | ||
210 | |||
211 | /* | ||
212 | * idx now points to the pool we want to allocate from. only the | ||
213 | * 1-vec entry pool is mempool backed. | ||
214 | */ | ||
215 | if (*idx == BIOVEC_MAX_IDX) { | ||
216 | fallback: | ||
217 | bvl = mempool_alloc(bs->bvec_pool, gfp_mask); | ||
218 | } else { | ||
219 | struct biovec_slab *bvs = bvec_slabs + *idx; | ||
220 | gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO); | ||
221 | |||
71 | /* | 222 | /* |
72 | * see comment near bvec_array define! | 223 | * Make this allocation restricted and don't dump info on |
224 | * allocation failures, since we'll fallback to the mempool | ||
225 | * in case of failure. | ||
73 | */ | 226 | */ |
74 | switch (nr) { | 227 | __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; |
75 | case 1: | ||
76 | *idx = 0; | ||
77 | break; | ||
78 | case 2 ... 4: | ||
79 | *idx = 1; | ||
80 | break; | ||
81 | case 5 ... 16: | ||
82 | *idx = 2; | ||
83 | break; | ||
84 | case 17 ... 64: | ||
85 | *idx = 3; | ||
86 | break; | ||
87 | case 65 ... 128: | ||
88 | *idx = 4; | ||
89 | break; | ||
90 | case 129 ... BIO_MAX_PAGES: | ||
91 | *idx = 5; | ||
92 | break; | ||
93 | default: | ||
94 | return NULL; | ||
95 | } | ||
96 | 228 | ||
97 | /* | 229 | /* |
98 | * idx now points to the pool we want to allocate from | 230 | * Try a slab allocation. If this fails and __GFP_WAIT |
231 | * is set, retry with the 1-entry mempool | ||
99 | */ | 232 | */ |
100 | bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask); | 233 | bvl = kmem_cache_alloc(bvs->slab, __gfp_mask); |
101 | if (bvl) | 234 | if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) { |
102 | memset(bvl, 0, | 235 | *idx = BIOVEC_MAX_IDX; |
103 | bvec_nr_vecs(*idx) * sizeof(struct bio_vec)); | 236 | goto fallback; |
104 | } else | 237 | } |
105 | bvl = kzalloc(nr * sizeof(struct bio_vec), gfp_mask); | 238 | } |
106 | 239 | ||
107 | return bvl; | 240 | return bvl; |
108 | } | 241 | } |
109 | 242 | ||
110 | void bio_free(struct bio *bio, struct bio_set *bio_set) | 243 | void bio_free(struct bio *bio, struct bio_set *bs) |
111 | { | 244 | { |
112 | if (bio->bi_io_vec) { | 245 | void *p; |
113 | const int pool_idx = BIO_POOL_IDX(bio); | ||
114 | 246 | ||
115 | BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS); | 247 | if (bio_has_allocated_vec(bio)) |
116 | 248 | bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio)); | |
117 | mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]); | ||
118 | } | ||
119 | 249 | ||
120 | if (bio_integrity(bio)) | 250 | if (bio_integrity(bio)) |
121 | bio_integrity_free(bio, bio_set); | 251 | bio_integrity_free(bio, bs); |
252 | |||
253 | /* | ||
254 | * If we have front padding, adjust the bio pointer before freeing | ||
255 | */ | ||
256 | p = bio; | ||
257 | if (bs->front_pad) | ||
258 | p -= bs->front_pad; | ||
122 | 259 | ||
123 | mempool_free(bio, bio_set->bio_pool); | 260 | mempool_free(p, bs->bio_pool); |
124 | } | 261 | } |
125 | 262 | ||
126 | /* | 263 | /* |
@@ -133,7 +270,8 @@ static void bio_fs_destructor(struct bio *bio) | |||
133 | 270 | ||
134 | static void bio_kmalloc_destructor(struct bio *bio) | 271 | static void bio_kmalloc_destructor(struct bio *bio) |
135 | { | 272 | { |
136 | kfree(bio->bi_io_vec); | 273 | if (bio_has_allocated_vec(bio)) |
274 | kfree(bio->bi_io_vec); | ||
137 | kfree(bio); | 275 | kfree(bio); |
138 | } | 276 | } |
139 | 277 | ||
@@ -157,16 +295,20 @@ void bio_init(struct bio *bio) | |||
157 | * for a &struct bio to become free. If a %NULL @bs is passed in, we will | 295 | * for a &struct bio to become free. If a %NULL @bs is passed in, we will |
158 | * fall back to just using @kmalloc to allocate the required memory. | 296 | * fall back to just using @kmalloc to allocate the required memory. |
159 | * | 297 | * |
160 | * allocate bio and iovecs from the memory pools specified by the | 298 | * Note that the caller must set ->bi_destructor on succesful return |
161 | * bio_set structure, or @kmalloc if none given. | 299 | * of a bio, to do the appropriate freeing of the bio once the reference |
300 | * count drops to zero. | ||
162 | **/ | 301 | **/ |
163 | struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) | 302 | struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) |
164 | { | 303 | { |
165 | struct bio *bio; | 304 | struct bio *bio = NULL; |
305 | |||
306 | if (bs) { | ||
307 | void *p = mempool_alloc(bs->bio_pool, gfp_mask); | ||
166 | 308 | ||
167 | if (bs) | 309 | if (p) |
168 | bio = mempool_alloc(bs->bio_pool, gfp_mask); | 310 | bio = p + bs->front_pad; |
169 | else | 311 | } else |
170 | bio = kmalloc(sizeof(*bio), gfp_mask); | 312 | bio = kmalloc(sizeof(*bio), gfp_mask); |
171 | 313 | ||
172 | if (likely(bio)) { | 314 | if (likely(bio)) { |
@@ -176,7 +318,15 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) | |||
176 | if (likely(nr_iovecs)) { | 318 | if (likely(nr_iovecs)) { |
177 | unsigned long uninitialized_var(idx); | 319 | unsigned long uninitialized_var(idx); |
178 | 320 | ||
179 | bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); | 321 | if (nr_iovecs <= BIO_INLINE_VECS) { |
322 | idx = 0; | ||
323 | bvl = bio->bi_inline_vecs; | ||
324 | nr_iovecs = BIO_INLINE_VECS; | ||
325 | } else { | ||
326 | bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, | ||
327 | bs); | ||
328 | nr_iovecs = bvec_nr_vecs(idx); | ||
329 | } | ||
180 | if (unlikely(!bvl)) { | 330 | if (unlikely(!bvl)) { |
181 | if (bs) | 331 | if (bs) |
182 | mempool_free(bio, bs->bio_pool); | 332 | mempool_free(bio, bs->bio_pool); |
@@ -186,7 +336,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) | |||
186 | goto out; | 336 | goto out; |
187 | } | 337 | } |
188 | bio->bi_flags |= idx << BIO_POOL_OFFSET; | 338 | bio->bi_flags |= idx << BIO_POOL_OFFSET; |
189 | bio->bi_max_vecs = bvec_nr_vecs(idx); | 339 | bio->bi_max_vecs = nr_iovecs; |
190 | } | 340 | } |
191 | bio->bi_io_vec = bvl; | 341 | bio->bi_io_vec = bvl; |
192 | } | 342 | } |
@@ -1346,30 +1496,18 @@ EXPORT_SYMBOL(bio_sector_offset); | |||
1346 | */ | 1496 | */ |
1347 | static int biovec_create_pools(struct bio_set *bs, int pool_entries) | 1497 | static int biovec_create_pools(struct bio_set *bs, int pool_entries) |
1348 | { | 1498 | { |
1349 | int i; | 1499 | struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX; |
1350 | 1500 | ||
1351 | for (i = 0; i < BIOVEC_NR_POOLS; i++) { | 1501 | bs->bvec_pool = mempool_create_slab_pool(pool_entries, bp->slab); |
1352 | struct biovec_slab *bp = bvec_slabs + i; | 1502 | if (!bs->bvec_pool) |
1353 | mempool_t **bvp = bs->bvec_pools + i; | 1503 | return -ENOMEM; |
1354 | 1504 | ||
1355 | *bvp = mempool_create_slab_pool(pool_entries, bp->slab); | ||
1356 | if (!*bvp) | ||
1357 | return -ENOMEM; | ||
1358 | } | ||
1359 | return 0; | 1505 | return 0; |
1360 | } | 1506 | } |
1361 | 1507 | ||
1362 | static void biovec_free_pools(struct bio_set *bs) | 1508 | static void biovec_free_pools(struct bio_set *bs) |
1363 | { | 1509 | { |
1364 | int i; | 1510 | mempool_destroy(bs->bvec_pool); |
1365 | |||
1366 | for (i = 0; i < BIOVEC_NR_POOLS; i++) { | ||
1367 | mempool_t *bvp = bs->bvec_pools[i]; | ||
1368 | |||
1369 | if (bvp) | ||
1370 | mempool_destroy(bvp); | ||
1371 | } | ||
1372 | |||
1373 | } | 1511 | } |
1374 | 1512 | ||
1375 | void bioset_free(struct bio_set *bs) | 1513 | void bioset_free(struct bio_set *bs) |
@@ -1379,25 +1517,49 @@ void bioset_free(struct bio_set *bs) | |||
1379 | 1517 | ||
1380 | bioset_integrity_free(bs); | 1518 | bioset_integrity_free(bs); |
1381 | biovec_free_pools(bs); | 1519 | biovec_free_pools(bs); |
1520 | bio_put_slab(bs); | ||
1382 | 1521 | ||
1383 | kfree(bs); | 1522 | kfree(bs); |
1384 | } | 1523 | } |
1385 | 1524 | ||
1386 | struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size) | 1525 | /** |
1526 | * bioset_create - Create a bio_set | ||
1527 | * @pool_size: Number of bio and bio_vecs to cache in the mempool | ||
1528 | * @front_pad: Number of bytes to allocate in front of the returned bio | ||
1529 | * | ||
1530 | * Description: | ||
1531 | * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller | ||
1532 | * to ask for a number of bytes to be allocated in front of the bio. | ||
1533 | * Front pad allocation is useful for embedding the bio inside | ||
1534 | * another structure, to avoid allocating extra data to go with the bio. | ||
1535 | * Note that the bio must be embedded at the END of that structure always, | ||
1536 | * or things will break badly. | ||
1537 | */ | ||
1538 | struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) | ||
1387 | { | 1539 | { |
1388 | struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL); | 1540 | unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); |
1541 | struct bio_set *bs; | ||
1389 | 1542 | ||
1543 | bs = kzalloc(sizeof(*bs), GFP_KERNEL); | ||
1390 | if (!bs) | 1544 | if (!bs) |
1391 | return NULL; | 1545 | return NULL; |
1392 | 1546 | ||
1393 | bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab); | 1547 | bs->front_pad = front_pad; |
1548 | |||
1549 | bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad); | ||
1550 | if (!bs->bio_slab) { | ||
1551 | kfree(bs); | ||
1552 | return NULL; | ||
1553 | } | ||
1554 | |||
1555 | bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab); | ||
1394 | if (!bs->bio_pool) | 1556 | if (!bs->bio_pool) |
1395 | goto bad; | 1557 | goto bad; |
1396 | 1558 | ||
1397 | if (bioset_integrity_create(bs, bio_pool_size)) | 1559 | if (bioset_integrity_create(bs, pool_size)) |
1398 | goto bad; | 1560 | goto bad; |
1399 | 1561 | ||
1400 | if (!biovec_create_pools(bs, bvec_pool_size)) | 1562 | if (!biovec_create_pools(bs, pool_size)) |
1401 | return bs; | 1563 | return bs; |
1402 | 1564 | ||
1403 | bad: | 1565 | bad: |
@@ -1421,12 +1583,16 @@ static void __init biovec_init_slabs(void) | |||
1421 | 1583 | ||
1422 | static int __init init_bio(void) | 1584 | static int __init init_bio(void) |
1423 | { | 1585 | { |
1424 | bio_slab = KMEM_CACHE(bio, SLAB_HWCACHE_ALIGN|SLAB_PANIC); | 1586 | bio_slab_max = 2; |
1587 | bio_slab_nr = 0; | ||
1588 | bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL); | ||
1589 | if (!bio_slabs) | ||
1590 | panic("bio: can't allocate bios\n"); | ||
1425 | 1591 | ||
1426 | bio_integrity_init_slab(); | 1592 | bio_integrity_init_slab(); |
1427 | biovec_init_slabs(); | 1593 | biovec_init_slabs(); |
1428 | 1594 | ||
1429 | fs_bio_set = bioset_create(BIO_POOL_SIZE, 2); | 1595 | fs_bio_set = bioset_create(BIO_POOL_SIZE, 0); |
1430 | if (!fs_bio_set) | 1596 | if (!fs_bio_set) |
1431 | panic("bio: can't allocate bios\n"); | 1597 | panic("bio: can't allocate bios\n"); |
1432 | 1598 | ||