diff options
author | Mikulas Patocka <mpatocka@redhat.com> | 2017-01-04 14:23:52 -0500 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2017-03-07 13:28:33 -0500 |
commit | 400a0befc96240f7bb2a53b9622deffd55d385fe (patch) | |
tree | e58983fef536dfc8e03d7d1c0da16d8ad1e768f4 | |
parent | 9b4b5a797cf8a8d904df979891a8de53f2cb9694 (diff) |
dm bufio: add sector start offset to dm-bufio interface
Introduce dm_bufio_set_sector_offset() interface to allow setting a
sector offset for a dm-bufio client. This is a prereq for the DM
integrity target.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Milan Broz <gmazyland@gmail.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r-- | drivers/md/dm-bufio.c | 51 | ||||
-rw-r--r-- | drivers/md/dm-bufio.h | 7 |
2 files changed, 39 insertions, 19 deletions
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index df4859f6ac6a..280b56c229d6 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -110,6 +110,8 @@ struct dm_bufio_client { | |||
110 | struct rb_root buffer_tree; | 110 | struct rb_root buffer_tree; |
111 | wait_queue_head_t free_buffer_wait; | 111 | wait_queue_head_t free_buffer_wait; |
112 | 112 | ||
113 | sector_t start; | ||
114 | |||
113 | int async_write_error; | 115 | int async_write_error; |
114 | 116 | ||
115 | struct list_head client_list; | 117 | struct list_head client_list; |
@@ -557,8 +559,8 @@ static void dmio_complete(unsigned long error, void *context) | |||
557 | b->bio.bi_end_io(&b->bio); | 559 | b->bio.bi_end_io(&b->bio); |
558 | } | 560 | } |
559 | 561 | ||
560 | static void use_dmio(struct dm_buffer *b, int rw, sector_t block, | 562 | static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, |
561 | bio_end_io_t *end_io) | 563 | unsigned n_sectors, bio_end_io_t *end_io) |
562 | { | 564 | { |
563 | int r; | 565 | int r; |
564 | struct dm_io_request io_req = { | 566 | struct dm_io_request io_req = { |
@@ -570,8 +572,8 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block, | |||
570 | }; | 572 | }; |
571 | struct dm_io_region region = { | 573 | struct dm_io_region region = { |
572 | .bdev = b->c->bdev, | 574 | .bdev = b->c->bdev, |
573 | .sector = block << b->c->sectors_per_block_bits, | 575 | .sector = sector, |
574 | .count = b->c->block_size >> SECTOR_SHIFT, | 576 | .count = n_sectors, |
575 | }; | 577 | }; |
576 | 578 | ||
577 | if (b->data_mode != DATA_MODE_VMALLOC) { | 579 | if (b->data_mode != DATA_MODE_VMALLOC) { |
@@ -606,14 +608,14 @@ static void inline_endio(struct bio *bio) | |||
606 | end_fn(bio); | 608 | end_fn(bio); |
607 | } | 609 | } |
608 | 610 | ||
609 | static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, | 611 | static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector, |
610 | bio_end_io_t *end_io) | 612 | unsigned n_sectors, bio_end_io_t *end_io) |
611 | { | 613 | { |
612 | char *ptr; | 614 | char *ptr; |
613 | int len; | 615 | int len; |
614 | 616 | ||
615 | bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS); | 617 | bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS); |
616 | b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits; | 618 | b->bio.bi_iter.bi_sector = sector; |
617 | b->bio.bi_bdev = b->c->bdev; | 619 | b->bio.bi_bdev = b->c->bdev; |
618 | b->bio.bi_end_io = inline_endio; | 620 | b->bio.bi_end_io = inline_endio; |
619 | /* | 621 | /* |
@@ -628,7 +630,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, | |||
628 | * If len < PAGE_SIZE the buffer doesn't cross page boundary. | 630 | * If len < PAGE_SIZE the buffer doesn't cross page boundary. |
629 | */ | 631 | */ |
630 | ptr = b->data; | 632 | ptr = b->data; |
631 | len = b->c->block_size; | 633 | len = n_sectors << SECTOR_SHIFT; |
632 | 634 | ||
633 | if (len >= PAGE_SIZE) | 635 | if (len >= PAGE_SIZE) |
634 | BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1)); | 636 | BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1)); |
@@ -640,7 +642,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, | |||
640 | len < PAGE_SIZE ? len : PAGE_SIZE, | 642 | len < PAGE_SIZE ? len : PAGE_SIZE, |
641 | offset_in_page(ptr))) { | 643 | offset_in_page(ptr))) { |
642 | BUG_ON(b->c->block_size <= PAGE_SIZE); | 644 | BUG_ON(b->c->block_size <= PAGE_SIZE); |
643 | use_dmio(b, rw, block, end_io); | 645 | use_dmio(b, rw, sector, n_sectors, end_io); |
644 | return; | 646 | return; |
645 | } | 647 | } |
646 | 648 | ||
@@ -651,17 +653,22 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, | |||
651 | submit_bio(&b->bio); | 653 | submit_bio(&b->bio); |
652 | } | 654 | } |
653 | 655 | ||
654 | static void submit_io(struct dm_buffer *b, int rw, sector_t block, | 656 | static void submit_io(struct dm_buffer *b, int rw, bio_end_io_t *end_io) |
655 | bio_end_io_t *end_io) | ||
656 | { | 657 | { |
658 | unsigned n_sectors; | ||
659 | sector_t sector; | ||
660 | |||
657 | if (rw == WRITE && b->c->write_callback) | 661 | if (rw == WRITE && b->c->write_callback) |
658 | b->c->write_callback(b); | 662 | b->c->write_callback(b); |
659 | 663 | ||
660 | if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE && | 664 | sector = (b->block << b->c->sectors_per_block_bits) + b->c->start; |
665 | n_sectors = 1 << b->c->sectors_per_block_bits; | ||
666 | |||
667 | if (n_sectors <= ((DM_BUFIO_INLINE_VECS * PAGE_SIZE) >> SECTOR_SHIFT) && | ||
661 | b->data_mode != DATA_MODE_VMALLOC) | 668 | b->data_mode != DATA_MODE_VMALLOC) |
662 | use_inline_bio(b, rw, block, end_io); | 669 | use_inline_bio(b, rw, sector, n_sectors, end_io); |
663 | else | 670 | else |
664 | use_dmio(b, rw, block, end_io); | 671 | use_dmio(b, rw, sector, n_sectors, end_io); |
665 | } | 672 | } |
666 | 673 | ||
667 | /*---------------------------------------------------------------- | 674 | /*---------------------------------------------------------------- |
@@ -713,7 +720,7 @@ static void __write_dirty_buffer(struct dm_buffer *b, | |||
713 | wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); | 720 | wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); |
714 | 721 | ||
715 | if (!write_list) | 722 | if (!write_list) |
716 | submit_io(b, WRITE, b->block, write_endio); | 723 | submit_io(b, WRITE, write_endio); |
717 | else | 724 | else |
718 | list_add_tail(&b->write_list, write_list); | 725 | list_add_tail(&b->write_list, write_list); |
719 | } | 726 | } |
@@ -726,7 +733,7 @@ static void __flush_write_list(struct list_head *write_list) | |||
726 | struct dm_buffer *b = | 733 | struct dm_buffer *b = |
727 | list_entry(write_list->next, struct dm_buffer, write_list); | 734 | list_entry(write_list->next, struct dm_buffer, write_list); |
728 | list_del(&b->write_list); | 735 | list_del(&b->write_list); |
729 | submit_io(b, WRITE, b->block, write_endio); | 736 | submit_io(b, WRITE, write_endio); |
730 | cond_resched(); | 737 | cond_resched(); |
731 | } | 738 | } |
732 | blk_finish_plug(&plug); | 739 | blk_finish_plug(&plug); |
@@ -1094,7 +1101,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block, | |||
1094 | return NULL; | 1101 | return NULL; |
1095 | 1102 | ||
1096 | if (need_submit) | 1103 | if (need_submit) |
1097 | submit_io(b, READ, b->block, read_endio); | 1104 | submit_io(b, READ, read_endio); |
1098 | 1105 | ||
1099 | wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); | 1106 | wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); |
1100 | 1107 | ||
@@ -1164,7 +1171,7 @@ void dm_bufio_prefetch(struct dm_bufio_client *c, | |||
1164 | dm_bufio_unlock(c); | 1171 | dm_bufio_unlock(c); |
1165 | 1172 | ||
1166 | if (need_submit) | 1173 | if (need_submit) |
1167 | submit_io(b, READ, b->block, read_endio); | 1174 | submit_io(b, READ, read_endio); |
1168 | dm_bufio_release(b); | 1175 | dm_bufio_release(b); |
1169 | 1176 | ||
1170 | cond_resched(); | 1177 | cond_resched(); |
@@ -1405,7 +1412,7 @@ retry: | |||
1405 | old_block = b->block; | 1412 | old_block = b->block; |
1406 | __unlink_buffer(b); | 1413 | __unlink_buffer(b); |
1407 | __link_buffer(b, new_block, b->list_mode); | 1414 | __link_buffer(b, new_block, b->list_mode); |
1408 | submit_io(b, WRITE, new_block, write_endio); | 1415 | submit_io(b, WRITE, write_endio); |
1409 | wait_on_bit_io(&b->state, B_WRITING, | 1416 | wait_on_bit_io(&b->state, B_WRITING, |
1410 | TASK_UNINTERRUPTIBLE); | 1417 | TASK_UNINTERRUPTIBLE); |
1411 | __unlink_buffer(b); | 1418 | __unlink_buffer(b); |
@@ -1762,6 +1769,12 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c) | |||
1762 | } | 1769 | } |
1763 | EXPORT_SYMBOL_GPL(dm_bufio_client_destroy); | 1770 | EXPORT_SYMBOL_GPL(dm_bufio_client_destroy); |
1764 | 1771 | ||
1772 | void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start) | ||
1773 | { | ||
1774 | c->start = start; | ||
1775 | } | ||
1776 | EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset); | ||
1777 | |||
1765 | static unsigned get_max_age_hz(void) | 1778 | static unsigned get_max_age_hz(void) |
1766 | { | 1779 | { |
1767 | unsigned max_age = ACCESS_ONCE(dm_bufio_max_age); | 1780 | unsigned max_age = ACCESS_ONCE(dm_bufio_max_age); |
diff --git a/drivers/md/dm-bufio.h b/drivers/md/dm-bufio.h index c096779a7292..b6d8f53ec15b 100644 --- a/drivers/md/dm-bufio.h +++ b/drivers/md/dm-bufio.h | |||
@@ -32,6 +32,13 @@ dm_bufio_client_create(struct block_device *bdev, unsigned block_size, | |||
32 | void dm_bufio_client_destroy(struct dm_bufio_client *c); | 32 | void dm_bufio_client_destroy(struct dm_bufio_client *c); |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * Set the sector range. | ||
36 | * When this function is called, there must be no I/O in progress on the bufio | ||
37 | * client. | ||
38 | */ | ||
39 | void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start); | ||
40 | |||
41 | /* | ||
35 | * WARNING: to avoid deadlocks, these conditions are observed: | 42 | * WARNING: to avoid deadlocks, these conditions are observed: |
36 | * | 43 | * |
37 | * - At most one thread can hold at most "reserved_buffers" simultaneously. | 44 | * - At most one thread can hold at most "reserved_buffers" simultaneously. |