diff options
Diffstat (limited to 'drivers/md/dm-io.c')
| -rw-r--r-- | drivers/md/dm-io.c | 77 |
1 files changed, 42 insertions, 35 deletions
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index db404a0f7e2c..c09359db3a90 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
| @@ -33,7 +33,6 @@ struct dm_io_client { | |||
| 33 | struct io { | 33 | struct io { |
| 34 | unsigned long error_bits; | 34 | unsigned long error_bits; |
| 35 | atomic_t count; | 35 | atomic_t count; |
| 36 | struct completion *wait; | ||
| 37 | struct dm_io_client *client; | 36 | struct dm_io_client *client; |
| 38 | io_notify_fn callback; | 37 | io_notify_fn callback; |
| 39 | void *context; | 38 | void *context; |
| @@ -112,28 +111,27 @@ static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, | |||
| 112 | * We need an io object to keep track of the number of bios that | 111 | * We need an io object to keep track of the number of bios that |
| 113 | * have been dispatched for a particular io. | 112 | * have been dispatched for a particular io. |
| 114 | *---------------------------------------------------------------*/ | 113 | *---------------------------------------------------------------*/ |
| 115 | static void dec_count(struct io *io, unsigned int region, int error) | 114 | static void complete_io(struct io *io) |
| 116 | { | 115 | { |
| 117 | if (error) | 116 | unsigned long error_bits = io->error_bits; |
| 118 | set_bit(region, &io->error_bits); | 117 | io_notify_fn fn = io->callback; |
| 118 | void *context = io->context; | ||
| 119 | 119 | ||
| 120 | if (atomic_dec_and_test(&io->count)) { | 120 | if (io->vma_invalidate_size) |
| 121 | if (io->vma_invalidate_size) | 121 | invalidate_kernel_vmap_range(io->vma_invalidate_address, |
| 122 | invalidate_kernel_vmap_range(io->vma_invalidate_address, | 122 | io->vma_invalidate_size); |
| 123 | io->vma_invalidate_size); | ||
| 124 | 123 | ||
| 125 | if (io->wait) | 124 | mempool_free(io, io->client->pool); |
| 126 | complete(io->wait); | 125 | fn(error_bits, context); |
| 126 | } | ||
| 127 | 127 | ||
| 128 | else { | 128 | static void dec_count(struct io *io, unsigned int region, int error) |
| 129 | unsigned long r = io->error_bits; | 129 | { |
| 130 | io_notify_fn fn = io->callback; | 130 | if (error) |
| 131 | void *context = io->context; | 131 | set_bit(region, &io->error_bits); |
| 132 | 132 | ||
| 133 | mempool_free(io, io->client->pool); | 133 | if (atomic_dec_and_test(&io->count)) |
| 134 | fn(r, context); | 134 | complete_io(io); |
| 135 | } | ||
| 136 | } | ||
| 137 | } | 135 | } |
| 138 | 136 | ||
| 139 | static void endio(struct bio *bio, int error) | 137 | static void endio(struct bio *bio, int error) |
| @@ -376,41 +374,51 @@ static void dispatch_io(int rw, unsigned int num_regions, | |||
| 376 | dec_count(io, 0, 0); | 374 | dec_count(io, 0, 0); |
| 377 | } | 375 | } |
| 378 | 376 | ||
| 377 | struct sync_io { | ||
| 378 | unsigned long error_bits; | ||
| 379 | struct completion wait; | ||
| 380 | }; | ||
| 381 | |||
| 382 | static void sync_io_complete(unsigned long error, void *context) | ||
| 383 | { | ||
| 384 | struct sync_io *sio = context; | ||
| 385 | |||
| 386 | sio->error_bits = error; | ||
| 387 | complete(&sio->wait); | ||
| 388 | } | ||
| 389 | |||
| 379 | static int sync_io(struct dm_io_client *client, unsigned int num_regions, | 390 | static int sync_io(struct dm_io_client *client, unsigned int num_regions, |
| 380 | struct dm_io_region *where, int rw, struct dpages *dp, | 391 | struct dm_io_region *where, int rw, struct dpages *dp, |
| 381 | unsigned long *error_bits) | 392 | unsigned long *error_bits) |
| 382 | { | 393 | { |
| 383 | /* | 394 | struct io *io; |
| 384 | * gcc <= 4.3 can't do the alignment for stack variables, so we must | 395 | struct sync_io sio; |
| 385 | * align it on our own. | ||
| 386 | * volatile prevents the optimizer from removing or reusing | ||
| 387 | * "io_" field from the stack frame (allowed in ANSI C). | ||
| 388 | */ | ||
| 389 | volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; | ||
| 390 | struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); | ||
| 391 | DECLARE_COMPLETION_ONSTACK(wait); | ||
| 392 | 396 | ||
| 393 | if (num_regions > 1 && (rw & RW_MASK) != WRITE) { | 397 | if (num_regions > 1 && (rw & RW_MASK) != WRITE) { |
| 394 | WARN_ON(1); | 398 | WARN_ON(1); |
| 395 | return -EIO; | 399 | return -EIO; |
| 396 | } | 400 | } |
| 397 | 401 | ||
| 402 | init_completion(&sio.wait); | ||
| 403 | |||
| 404 | io = mempool_alloc(client->pool, GFP_NOIO); | ||
| 398 | io->error_bits = 0; | 405 | io->error_bits = 0; |
| 399 | atomic_set(&io->count, 1); /* see dispatch_io() */ | 406 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
| 400 | io->wait = &wait; | ||
| 401 | io->client = client; | 407 | io->client = client; |
| 408 | io->callback = sync_io_complete; | ||
| 409 | io->context = &sio; | ||
| 402 | 410 | ||
| 403 | io->vma_invalidate_address = dp->vma_invalidate_address; | 411 | io->vma_invalidate_address = dp->vma_invalidate_address; |
| 404 | io->vma_invalidate_size = dp->vma_invalidate_size; | 412 | io->vma_invalidate_size = dp->vma_invalidate_size; |
| 405 | 413 | ||
| 406 | dispatch_io(rw, num_regions, where, dp, io, 1); | 414 | dispatch_io(rw, num_regions, where, dp, io, 1); |
| 407 | 415 | ||
| 408 | wait_for_completion_io(&wait); | 416 | wait_for_completion_io(&sio.wait); |
| 409 | 417 | ||
| 410 | if (error_bits) | 418 | if (error_bits) |
| 411 | *error_bits = io->error_bits; | 419 | *error_bits = sio.error_bits; |
| 412 | 420 | ||
| 413 | return io->error_bits ? -EIO : 0; | 421 | return sio.error_bits ? -EIO : 0; |
| 414 | } | 422 | } |
| 415 | 423 | ||
| 416 | static int async_io(struct dm_io_client *client, unsigned int num_regions, | 424 | static int async_io(struct dm_io_client *client, unsigned int num_regions, |
| @@ -428,7 +436,6 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, | |||
| 428 | io = mempool_alloc(client->pool, GFP_NOIO); | 436 | io = mempool_alloc(client->pool, GFP_NOIO); |
| 429 | io->error_bits = 0; | 437 | io->error_bits = 0; |
| 430 | atomic_set(&io->count, 1); /* see dispatch_io() */ | 438 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
| 431 | io->wait = NULL; | ||
| 432 | io->client = client; | 439 | io->client = client; |
| 433 | io->callback = fn; | 440 | io->callback = fn; |
| 434 | io->context = context; | 441 | io->context = context; |
| @@ -481,9 +488,9 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp, | |||
| 481 | * New collapsed (a)synchronous interface. | 488 | * New collapsed (a)synchronous interface. |
| 482 | * | 489 | * |
| 483 | * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug | 490 | * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug |
| 484 | * the queue with blk_unplug() some time later or set REQ_SYNC in | 491 | * the queue with blk_unplug() some time later or set REQ_SYNC in io_req->bi_rw. |
| 485 | io_req->bi_rw. If you fail to do one of these, the IO will be submitted to | 492 | * If you fail to do one of these, the IO will be submitted to the disk after |
| 486 | * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. | 493 | * q->unplug_delay, which defaults to 3ms in blk-settings.c. |
| 487 | */ | 494 | */ |
| 488 | int dm_io(struct dm_io_request *io_req, unsigned num_regions, | 495 | int dm_io(struct dm_io_request *io_req, unsigned num_regions, |
| 489 | struct dm_io_region *where, unsigned long *sync_error_bits) | 496 | struct dm_io_region *where, unsigned long *sync_error_bits) |
