diff options
author | Heinz Mauelshagen <hjm@redhat.com> | 2007-05-09 05:33:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 15:30:47 -0400 |
commit | 891ce207011d3d9219f79fd5114c8594bbacc653 (patch) | |
tree | 969ca658f41ec8a9f719a44a487ae582ee278c27 /drivers/md | |
parent | c897feb3dcf3c3300849056ee82b01df7bf66d3c (diff) |
dm io: prepare for new interface
Introduce struct dm_io_client to prepare for per-client mempools and bio_sets.
Temporary functions bios() and io_pool() choose between the per-client
structures and the global ones so the old and new interfaces can co-exist.
Make error_bits optional.
Signed-off-by: Heinz Mauelshagen <hjm@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Cc: Milan Broz <mbroz@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-io.c | 61 |
1 files changed, 44 insertions, 17 deletions
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 4d19c45158b4..66db79208c1d 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2003 Sistina Software | 2 | * Copyright (C) 2003 Sistina Software |
3 | * Copyright (C) 2006 Red Hat GmbH | ||
3 | * | 4 | * |
4 | * This file is released under the GPL. | 5 | * This file is released under the GPL. |
5 | */ | 6 | */ |
@@ -14,11 +15,17 @@ | |||
14 | 15 | ||
15 | static struct bio_set *_bios; | 16 | static struct bio_set *_bios; |
16 | 17 | ||
18 | struct dm_io_client { | ||
19 | mempool_t *pool; | ||
20 | struct bio_set *bios; | ||
21 | }; | ||
22 | |||
17 | /* FIXME: can we shrink this ? */ | 23 | /* FIXME: can we shrink this ? */ |
18 | struct io { | 24 | struct io { |
19 | unsigned long error; | 25 | unsigned long error; |
20 | atomic_t count; | 26 | atomic_t count; |
21 | struct task_struct *sleeper; | 27 | struct task_struct *sleeper; |
28 | struct dm_io_client *client; | ||
22 | io_notify_fn callback; | 29 | io_notify_fn callback; |
23 | void *context; | 30 | void *context; |
24 | }; | 31 | }; |
@@ -26,12 +33,24 @@ struct io { | |||
26 | /* | 33 | /* |
27 | * io contexts are only dynamically allocated for asynchronous | 34 | * io contexts are only dynamically allocated for asynchronous |
28 | * io. Since async io is likely to be the majority of io we'll | 35 | * io. Since async io is likely to be the majority of io we'll |
29 | * have the same number of io contexts as buffer heads ! (FIXME: | 36 | * have the same number of io contexts as bios! (FIXME: must reduce this). |
30 | * must reduce this). | ||
31 | */ | 37 | */ |
32 | static unsigned _num_ios; | 38 | static unsigned _num_ios; |
33 | static mempool_t *_io_pool; | 39 | static mempool_t *_io_pool; |
34 | 40 | ||
41 | /* | ||
42 | * Temporary functions to allow old and new interfaces to co-exist. | ||
43 | */ | ||
44 | static struct bio_set *bios(struct dm_io_client *client) | ||
45 | { | ||
46 | return client ? client->bios : _bios; | ||
47 | } | ||
48 | |||
49 | static mempool_t *io_pool(struct dm_io_client *client) | ||
50 | { | ||
51 | return client ? client->pool : _io_pool; | ||
52 | } | ||
53 | |||
35 | static unsigned int pages_to_ios(unsigned int pages) | 54 | static unsigned int pages_to_ios(unsigned int pages) |
36 | { | 55 | { |
37 | return 4 * pages; /* too many ? */ | 56 | return 4 * pages; /* too many ? */ |
@@ -118,7 +137,7 @@ static void dec_count(struct io *io, unsigned int region, int error) | |||
118 | io_notify_fn fn = io->callback; | 137 | io_notify_fn fn = io->callback; |
119 | void *context = io->context; | 138 | void *context = io->context; |
120 | 139 | ||
121 | mempool_free(io, _io_pool); | 140 | mempool_free(io, io_pool(io->client)); |
122 | fn(r, context); | 141 | fn(r, context); |
123 | } | 142 | } |
124 | } | 143 | } |
@@ -241,7 +260,9 @@ static void vm_dp_init(struct dpages *dp, void *data) | |||
241 | 260 | ||
242 | static void dm_bio_destructor(struct bio *bio) | 261 | static void dm_bio_destructor(struct bio *bio) |
243 | { | 262 | { |
244 | bio_free(bio, _bios); | 263 | struct io *io = bio->bi_private; |
264 | |||
265 | bio_free(bio, bios(io->client)); | ||
245 | } | 266 | } |
246 | 267 | ||
247 | /*----------------------------------------------------------------- | 268 | /*----------------------------------------------------------------- |
@@ -264,7 +285,7 @@ static void do_region(int rw, unsigned int region, struct io_region *where, | |||
264 | * to hide it from bio_add_page(). | 285 | * to hide it from bio_add_page(). |
265 | */ | 286 | */ |
266 | num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2; | 287 | num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2; |
267 | bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, _bios); | 288 | bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, bios(io->client)); |
268 | bio->bi_sector = where->sector + (where->count - remaining); | 289 | bio->bi_sector = where->sector + (where->count - remaining); |
269 | bio->bi_bdev = where->bdev; | 290 | bio->bi_bdev = where->bdev; |
270 | bio->bi_end_io = endio; | 291 | bio->bi_end_io = endio; |
@@ -319,8 +340,9 @@ static void dispatch_io(int rw, unsigned int num_regions, | |||
319 | dec_count(io, 0, 0); | 340 | dec_count(io, 0, 0); |
320 | } | 341 | } |
321 | 342 | ||
322 | static int sync_io(unsigned int num_regions, struct io_region *where, | 343 | static int sync_io(struct dm_io_client *client, unsigned int num_regions, |
323 | int rw, struct dpages *dp, unsigned long *error_bits) | 344 | struct io_region *where, int rw, struct dpages *dp, |
345 | unsigned long *error_bits) | ||
324 | { | 346 | { |
325 | struct io io; | 347 | struct io io; |
326 | 348 | ||
@@ -332,6 +354,7 @@ static int sync_io(unsigned int num_regions, struct io_region *where, | |||
332 | io.error = 0; | 354 | io.error = 0; |
333 | atomic_set(&io.count, 1); /* see dispatch_io() */ | 355 | atomic_set(&io.count, 1); /* see dispatch_io() */ |
334 | io.sleeper = current; | 356 | io.sleeper = current; |
357 | io.client = client; | ||
335 | 358 | ||
336 | dispatch_io(rw, num_regions, where, dp, &io, 1); | 359 | dispatch_io(rw, num_regions, where, dp, &io, 1); |
337 | 360 | ||
@@ -348,12 +371,15 @@ static int sync_io(unsigned int num_regions, struct io_region *where, | |||
348 | if (atomic_read(&io.count)) | 371 | if (atomic_read(&io.count)) |
349 | return -EINTR; | 372 | return -EINTR; |
350 | 373 | ||
351 | *error_bits = io.error; | 374 | if (error_bits) |
375 | *error_bits = io.error; | ||
376 | |||
352 | return io.error ? -EIO : 0; | 377 | return io.error ? -EIO : 0; |
353 | } | 378 | } |
354 | 379 | ||
355 | static int async_io(unsigned int num_regions, struct io_region *where, int rw, | 380 | static int async_io(struct dm_io_client *client, unsigned int num_regions, |
356 | struct dpages *dp, io_notify_fn fn, void *context) | 381 | struct io_region *where, int rw, struct dpages *dp, |
382 | io_notify_fn fn, void *context) | ||
357 | { | 383 | { |
358 | struct io *io; | 384 | struct io *io; |
359 | 385 | ||
@@ -363,10 +389,11 @@ static int async_io(unsigned int num_regions, struct io_region *where, int rw, | |||
363 | return -EIO; | 389 | return -EIO; |
364 | } | 390 | } |
365 | 391 | ||
366 | io = mempool_alloc(_io_pool, GFP_NOIO); | 392 | io = mempool_alloc(io_pool(client), GFP_NOIO); |
367 | io->error = 0; | 393 | io->error = 0; |
368 | atomic_set(&io->count, 1); /* see dispatch_io() */ | 394 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
369 | io->sleeper = NULL; | 395 | io->sleeper = NULL; |
396 | io->client = client; | ||
370 | io->callback = fn; | 397 | io->callback = fn; |
371 | io->context = context; | 398 | io->context = context; |
372 | 399 | ||
@@ -380,7 +407,7 @@ int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw, | |||
380 | { | 407 | { |
381 | struct dpages dp; | 408 | struct dpages dp; |
382 | list_dp_init(&dp, pl, offset); | 409 | list_dp_init(&dp, pl, offset); |
383 | return sync_io(num_regions, where, rw, &dp, error_bits); | 410 | return sync_io(NULL, num_regions, where, rw, &dp, error_bits); |
384 | } | 411 | } |
385 | 412 | ||
386 | int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw, | 413 | int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw, |
@@ -388,7 +415,7 @@ int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw, | |||
388 | { | 415 | { |
389 | struct dpages dp; | 416 | struct dpages dp; |
390 | bvec_dp_init(&dp, bvec); | 417 | bvec_dp_init(&dp, bvec); |
391 | return sync_io(num_regions, where, rw, &dp, error_bits); | 418 | return sync_io(NULL, num_regions, where, rw, &dp, error_bits); |
392 | } | 419 | } |
393 | 420 | ||
394 | int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw, | 421 | int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw, |
@@ -396,7 +423,7 @@ int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw, | |||
396 | { | 423 | { |
397 | struct dpages dp; | 424 | struct dpages dp; |
398 | vm_dp_init(&dp, data); | 425 | vm_dp_init(&dp, data); |
399 | return sync_io(num_regions, where, rw, &dp, error_bits); | 426 | return sync_io(NULL, num_regions, where, rw, &dp, error_bits); |
400 | } | 427 | } |
401 | 428 | ||
402 | int dm_io_async(unsigned int num_regions, struct io_region *where, int rw, | 429 | int dm_io_async(unsigned int num_regions, struct io_region *where, int rw, |
@@ -405,7 +432,7 @@ int dm_io_async(unsigned int num_regions, struct io_region *where, int rw, | |||
405 | { | 432 | { |
406 | struct dpages dp; | 433 | struct dpages dp; |
407 | list_dp_init(&dp, pl, offset); | 434 | list_dp_init(&dp, pl, offset); |
408 | return async_io(num_regions, where, rw, &dp, fn, context); | 435 | return async_io(NULL, num_regions, where, rw, &dp, fn, context); |
409 | } | 436 | } |
410 | 437 | ||
411 | int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw, | 438 | int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw, |
@@ -413,7 +440,7 @@ int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw, | |||
413 | { | 440 | { |
414 | struct dpages dp; | 441 | struct dpages dp; |
415 | bvec_dp_init(&dp, bvec); | 442 | bvec_dp_init(&dp, bvec); |
416 | return async_io(num_regions, where, rw, &dp, fn, context); | 443 | return async_io(NULL, num_regions, where, rw, &dp, fn, context); |
417 | } | 444 | } |
418 | 445 | ||
419 | int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw, | 446 | int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw, |
@@ -421,7 +448,7 @@ int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw, | |||
421 | { | 448 | { |
422 | struct dpages dp; | 449 | struct dpages dp; |
423 | vm_dp_init(&dp, data); | 450 | vm_dp_init(&dp, data); |
424 | return async_io(num_regions, where, rw, &dp, fn, context); | 451 | return async_io(NULL, num_regions, where, rw, &dp, fn, context); |
425 | } | 452 | } |
426 | 453 | ||
427 | EXPORT_SYMBOL(dm_io_get); | 454 | EXPORT_SYMBOL(dm_io_get); |