diff options
author | Mikulas Patocka <mpatocka@redhat.com> | 2012-03-28 13:41:29 -0400 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2012-03-28 13:41:29 -0400 |
commit | a66cc28f53a7e9679dedb2bc66ddb0e0c6bdd0ee (patch) | |
tree | 0d1037562bd8a47f6e9d31d7ed56e24c7aa98c66 /drivers/md/dm-bufio.c | |
parent | 67e2e2b281812b5caf4923a38aadc6b89e34f064 (diff) |
dm bufio: prefetch
This patch introduces a new function dm_bufio_prefetch. It prefetches
the specified range of blocks into dm-bufio cache without waiting
for i/o completion.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm-bufio.c')
-rw-r--r-- | drivers/md/dm-bufio.c | 108 |
1 files changed, 82 insertions, 26 deletions
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index b6e58c7b6df5..cc06a1e52423 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -578,7 +578,7 @@ static void write_endio(struct bio *bio, int error) | |||
578 | struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); | 578 | struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); |
579 | 579 | ||
580 | b->write_error = error; | 580 | b->write_error = error; |
581 | if (error) { | 581 | if (unlikely(error)) { |
582 | struct dm_bufio_client *c = b->c; | 582 | struct dm_bufio_client *c = b->c; |
583 | (void)cmpxchg(&c->async_write_error, 0, error); | 583 | (void)cmpxchg(&c->async_write_error, 0, error); |
584 | } | 584 | } |
@@ -697,13 +697,20 @@ static void __wait_for_free_buffer(struct dm_bufio_client *c) | |||
697 | dm_bufio_lock(c); | 697 | dm_bufio_lock(c); |
698 | } | 698 | } |
699 | 699 | ||
700 | enum new_flag { | ||
701 | NF_FRESH = 0, | ||
702 | NF_READ = 1, | ||
703 | NF_GET = 2, | ||
704 | NF_PREFETCH = 3 | ||
705 | }; | ||
706 | |||
700 | /* | 707 | /* |
701 | * Allocate a new buffer. If the allocation is not possible, wait until | 708 | * Allocate a new buffer. If the allocation is not possible, wait until |
702 | * some other thread frees a buffer. | 709 | * some other thread frees a buffer. |
703 | * | 710 | * |
704 | * May drop the lock and regain it. | 711 | * May drop the lock and regain it. |
705 | */ | 712 | */ |
706 | static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c) | 713 | static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf) |
707 | { | 714 | { |
708 | struct dm_buffer *b; | 715 | struct dm_buffer *b; |
709 | 716 | ||
@@ -726,6 +733,9 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client | |||
726 | return b; | 733 | return b; |
727 | } | 734 | } |
728 | 735 | ||
736 | if (nf == NF_PREFETCH) | ||
737 | return NULL; | ||
738 | |||
729 | if (!list_empty(&c->reserved_buffers)) { | 739 | if (!list_empty(&c->reserved_buffers)) { |
730 | b = list_entry(c->reserved_buffers.next, | 740 | b = list_entry(c->reserved_buffers.next, |
731 | struct dm_buffer, lru_list); | 741 | struct dm_buffer, lru_list); |
@@ -743,9 +753,12 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client | |||
743 | } | 753 | } |
744 | } | 754 | } |
745 | 755 | ||
746 | static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c) | 756 | static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf) |
747 | { | 757 | { |
748 | struct dm_buffer *b = __alloc_buffer_wait_no_callback(c); | 758 | struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf); |
759 | |||
760 | if (!b) | ||
761 | return NULL; | ||
749 | 762 | ||
750 | if (c->alloc_callback) | 763 | if (c->alloc_callback) |
751 | c->alloc_callback(b); | 764 | c->alloc_callback(b); |
@@ -865,32 +878,23 @@ static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block) | |||
865 | * Getting a buffer | 878 | * Getting a buffer |
866 | *--------------------------------------------------------------*/ | 879 | *--------------------------------------------------------------*/ |
867 | 880 | ||
868 | enum new_flag { | ||
869 | NF_FRESH = 0, | ||
870 | NF_READ = 1, | ||
871 | NF_GET = 2 | ||
872 | }; | ||
873 | |||
874 | static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, | 881 | static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, |
875 | enum new_flag nf, struct dm_buffer **bp, | 882 | enum new_flag nf, int *need_submit) |
876 | int *need_submit) | ||
877 | { | 883 | { |
878 | struct dm_buffer *b, *new_b = NULL; | 884 | struct dm_buffer *b, *new_b = NULL; |
879 | 885 | ||
880 | *need_submit = 0; | 886 | *need_submit = 0; |
881 | 887 | ||
882 | b = __find(c, block); | 888 | b = __find(c, block); |
883 | if (b) { | 889 | if (b) |
884 | b->hold_count++; | 890 | goto found_buffer; |
885 | __relink_lru(b, test_bit(B_DIRTY, &b->state) || | ||
886 | test_bit(B_WRITING, &b->state)); | ||
887 | return b; | ||
888 | } | ||
889 | 891 | ||
890 | if (nf == NF_GET) | 892 | if (nf == NF_GET) |
891 | return NULL; | 893 | return NULL; |
892 | 894 | ||
893 | new_b = __alloc_buffer_wait(c); | 895 | new_b = __alloc_buffer_wait(c, nf); |
896 | if (!new_b) | ||
897 | return NULL; | ||
894 | 898 | ||
895 | /* | 899 | /* |
896 | * We've had a period where the mutex was unlocked, so need to | 900 | * We've had a period where the mutex was unlocked, so need to |
@@ -899,10 +903,7 @@ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, | |||
899 | b = __find(c, block); | 903 | b = __find(c, block); |
900 | if (b) { | 904 | if (b) { |
901 | __free_buffer_wake(new_b); | 905 | __free_buffer_wake(new_b); |
902 | b->hold_count++; | 906 | goto found_buffer; |
903 | __relink_lru(b, test_bit(B_DIRTY, &b->state) || | ||
904 | test_bit(B_WRITING, &b->state)); | ||
905 | return b; | ||
906 | } | 907 | } |
907 | 908 | ||
908 | __check_watermark(c); | 909 | __check_watermark(c); |
@@ -922,6 +923,24 @@ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, | |||
922 | *need_submit = 1; | 923 | *need_submit = 1; |
923 | 924 | ||
924 | return b; | 925 | return b; |
926 | |||
927 | found_buffer: | ||
928 | if (nf == NF_PREFETCH) | ||
929 | return NULL; | ||
930 | /* | ||
931 | * Note: it is essential that we don't wait for the buffer to be | ||
932 | * read if dm_bufio_get function is used. Both dm_bufio_get and | ||
933 | * dm_bufio_prefetch can be used in the driver request routine. | ||
934 | * If the user called both dm_bufio_prefetch and dm_bufio_get on | ||
935 | * the same buffer, it would deadlock if we waited. | ||
936 | */ | ||
937 | if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state))) | ||
938 | return NULL; | ||
939 | |||
940 | b->hold_count++; | ||
941 | __relink_lru(b, test_bit(B_DIRTY, &b->state) || | ||
942 | test_bit(B_WRITING, &b->state)); | ||
943 | return b; | ||
925 | } | 944 | } |
926 | 945 | ||
927 | /* | 946 | /* |
@@ -956,10 +975,10 @@ static void *new_read(struct dm_bufio_client *c, sector_t block, | |||
956 | struct dm_buffer *b; | 975 | struct dm_buffer *b; |
957 | 976 | ||
958 | dm_bufio_lock(c); | 977 | dm_bufio_lock(c); |
959 | b = __bufio_new(c, block, nf, bp, &need_submit); | 978 | b = __bufio_new(c, block, nf, &need_submit); |
960 | dm_bufio_unlock(c); | 979 | dm_bufio_unlock(c); |
961 | 980 | ||
962 | if (!b || IS_ERR(b)) | 981 | if (!b) |
963 | return b; | 982 | return b; |
964 | 983 | ||
965 | if (need_submit) | 984 | if (need_submit) |
@@ -1005,13 +1024,47 @@ void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, | |||
1005 | } | 1024 | } |
1006 | EXPORT_SYMBOL_GPL(dm_bufio_new); | 1025 | EXPORT_SYMBOL_GPL(dm_bufio_new); |
1007 | 1026 | ||
1027 | void dm_bufio_prefetch(struct dm_bufio_client *c, | ||
1028 | sector_t block, unsigned n_blocks) | ||
1029 | { | ||
1030 | struct blk_plug plug; | ||
1031 | |||
1032 | blk_start_plug(&plug); | ||
1033 | dm_bufio_lock(c); | ||
1034 | |||
1035 | for (; n_blocks--; block++) { | ||
1036 | int need_submit; | ||
1037 | struct dm_buffer *b; | ||
1038 | b = __bufio_new(c, block, NF_PREFETCH, &need_submit); | ||
1039 | if (unlikely(b != NULL)) { | ||
1040 | dm_bufio_unlock(c); | ||
1041 | |||
1042 | if (need_submit) | ||
1043 | submit_io(b, READ, b->block, read_endio); | ||
1044 | dm_bufio_release(b); | ||
1045 | |||
1046 | dm_bufio_cond_resched(); | ||
1047 | |||
1048 | if (!n_blocks) | ||
1049 | goto flush_plug; | ||
1050 | dm_bufio_lock(c); | ||
1051 | } | ||
1052 | |||
1053 | } | ||
1054 | |||
1055 | dm_bufio_unlock(c); | ||
1056 | |||
1057 | flush_plug: | ||
1058 | blk_finish_plug(&plug); | ||
1059 | } | ||
1060 | EXPORT_SYMBOL_GPL(dm_bufio_prefetch); | ||
1061 | |||
1008 | void dm_bufio_release(struct dm_buffer *b) | 1062 | void dm_bufio_release(struct dm_buffer *b) |
1009 | { | 1063 | { |
1010 | struct dm_bufio_client *c = b->c; | 1064 | struct dm_bufio_client *c = b->c; |
1011 | 1065 | ||
1012 | dm_bufio_lock(c); | 1066 | dm_bufio_lock(c); |
1013 | 1067 | ||
1014 | BUG_ON(test_bit(B_READING, &b->state)); | ||
1015 | BUG_ON(!b->hold_count); | 1068 | BUG_ON(!b->hold_count); |
1016 | 1069 | ||
1017 | b->hold_count--; | 1070 | b->hold_count--; |
@@ -1024,6 +1077,7 @@ void dm_bufio_release(struct dm_buffer *b) | |||
1024 | * invalid buffer. | 1077 | * invalid buffer. |
1025 | */ | 1078 | */ |
1026 | if ((b->read_error || b->write_error) && | 1079 | if ((b->read_error || b->write_error) && |
1080 | !test_bit(B_READING, &b->state) && | ||
1027 | !test_bit(B_WRITING, &b->state) && | 1081 | !test_bit(B_WRITING, &b->state) && |
1028 | !test_bit(B_DIRTY, &b->state)) { | 1082 | !test_bit(B_DIRTY, &b->state)) { |
1029 | __unlink_buffer(b); | 1083 | __unlink_buffer(b); |
@@ -1041,6 +1095,8 @@ void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) | |||
1041 | 1095 | ||
1042 | dm_bufio_lock(c); | 1096 | dm_bufio_lock(c); |
1043 | 1097 | ||
1098 | BUG_ON(test_bit(B_READING, &b->state)); | ||
1099 | |||
1044 | if (!test_and_set_bit(B_DIRTY, &b->state)) | 1100 | if (!test_and_set_bit(B_DIRTY, &b->state)) |
1045 | __relink_lru(b, LIST_DIRTY); | 1101 | __relink_lru(b, LIST_DIRTY); |
1046 | 1102 | ||