aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2013-07-10 18:41:18 -0400
committerAlasdair G Kergon <agk@redhat.com>2013-07-10 18:41:18 -0400
commit2480945cd44b50ba8b1646544eec2db21f064f12 (patch)
treee9e3d5b75c4b1bdd83eaf7727016f607310c6574
parent43aeaa29573924df76f44eda2bbd94ca36e407b5 (diff)
dm bufio: submit writes outside lock
This patch changes dm-bufio so that it submits write I/Os outside of the lock. If the number of submitted buffers is greater than the number of requests on the target queue, submit_bio blocks. We want to block outside of the lock to improve latency of other threads that may need the lock. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
-rw-r--r--drivers/md/dm-bufio.c73
1 files changed, 58 insertions, 15 deletions
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 9588b864d311..5227e079a6e3 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -145,6 +145,7 @@ struct dm_buffer {
145 unsigned long state; 145 unsigned long state;
146 unsigned long last_accessed; 146 unsigned long last_accessed;
147 struct dm_bufio_client *c; 147 struct dm_bufio_client *c;
148 struct list_head write_list;
148 struct bio bio; 149 struct bio bio;
149 struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS]; 150 struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
150}; 151};
@@ -630,7 +631,8 @@ static int do_io_schedule(void *word)
630 * - Submit our write and don't wait on it. We set B_WRITING indicating 631 * - Submit our write and don't wait on it. We set B_WRITING indicating
631 * that there is a write in progress. 632 * that there is a write in progress.
632 */ 633 */
633static void __write_dirty_buffer(struct dm_buffer *b) 634static void __write_dirty_buffer(struct dm_buffer *b,
635 struct list_head *write_list)
634{ 636{
635 if (!test_bit(B_DIRTY, &b->state)) 637 if (!test_bit(B_DIRTY, &b->state))
636 return; 638 return;
@@ -639,7 +641,24 @@ static void __write_dirty_buffer(struct dm_buffer *b)
639 wait_on_bit_lock(&b->state, B_WRITING, 641 wait_on_bit_lock(&b->state, B_WRITING,
640 do_io_schedule, TASK_UNINTERRUPTIBLE); 642 do_io_schedule, TASK_UNINTERRUPTIBLE);
641 643
642 submit_io(b, WRITE, b->block, write_endio); 644 if (!write_list)
645 submit_io(b, WRITE, b->block, write_endio);
646 else
647 list_add_tail(&b->write_list, write_list);
648}
649
650static void __flush_write_list(struct list_head *write_list)
651{
652 struct blk_plug plug;
653 blk_start_plug(&plug);
654 while (!list_empty(write_list)) {
655 struct dm_buffer *b =
656 list_entry(write_list->next, struct dm_buffer, write_list);
657 list_del(&b->write_list);
658 submit_io(b, WRITE, b->block, write_endio);
659 dm_bufio_cond_resched();
660 }
661 blk_finish_plug(&plug);
643} 662}
644 663
645/* 664/*
@@ -655,7 +674,7 @@ static void __make_buffer_clean(struct dm_buffer *b)
655 return; 674 return;
656 675
657 wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE); 676 wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE);
658 __write_dirty_buffer(b); 677 __write_dirty_buffer(b, NULL);
659 wait_on_bit(&b->state, B_WRITING, do_io_schedule, TASK_UNINTERRUPTIBLE); 678 wait_on_bit(&b->state, B_WRITING, do_io_schedule, TASK_UNINTERRUPTIBLE);
660} 679}
661 680
@@ -802,7 +821,8 @@ static void __free_buffer_wake(struct dm_buffer *b)
802 wake_up(&c->free_buffer_wait); 821 wake_up(&c->free_buffer_wait);
803} 822}
804 823
805static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait) 824static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
825 struct list_head *write_list)
806{ 826{
807 struct dm_buffer *b, *tmp; 827 struct dm_buffer *b, *tmp;
808 828
@@ -818,7 +838,7 @@ static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait)
818 if (no_wait && test_bit(B_WRITING, &b->state)) 838 if (no_wait && test_bit(B_WRITING, &b->state))
819 return; 839 return;
820 840
821 __write_dirty_buffer(b); 841 __write_dirty_buffer(b, write_list);
822 dm_bufio_cond_resched(); 842 dm_bufio_cond_resched();
823 } 843 }
824} 844}
@@ -853,7 +873,8 @@ static void __get_memory_limit(struct dm_bufio_client *c,
853 * If we are over threshold_buffers, start freeing buffers. 873 * If we are over threshold_buffers, start freeing buffers.
854 * If we're over "limit_buffers", block until we get under the limit. 874 * If we're over "limit_buffers", block until we get under the limit.
855 */ 875 */
856static void __check_watermark(struct dm_bufio_client *c) 876static void __check_watermark(struct dm_bufio_client *c,
877 struct list_head *write_list)
857{ 878{
858 unsigned long threshold_buffers, limit_buffers; 879 unsigned long threshold_buffers, limit_buffers;
859 880
@@ -872,7 +893,7 @@ static void __check_watermark(struct dm_bufio_client *c)
872 } 893 }
873 894
874 if (c->n_buffers[LIST_DIRTY] > threshold_buffers) 895 if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
875 __write_dirty_buffers_async(c, 1); 896 __write_dirty_buffers_async(c, 1, write_list);
876} 897}
877 898
878/* 899/*
@@ -897,7 +918,8 @@ static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
897 *--------------------------------------------------------------*/ 918 *--------------------------------------------------------------*/
898 919
899static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, 920static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
900 enum new_flag nf, int *need_submit) 921 enum new_flag nf, int *need_submit,
922 struct list_head *write_list)
901{ 923{
902 struct dm_buffer *b, *new_b = NULL; 924 struct dm_buffer *b, *new_b = NULL;
903 925
@@ -924,7 +946,7 @@ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
924 goto found_buffer; 946 goto found_buffer;
925 } 947 }
926 948
927 __check_watermark(c); 949 __check_watermark(c, write_list);
928 950
929 b = new_b; 951 b = new_b;
930 b->hold_count = 1; 952 b->hold_count = 1;
@@ -992,10 +1014,14 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
992 int need_submit; 1014 int need_submit;
993 struct dm_buffer *b; 1015 struct dm_buffer *b;
994 1016
1017 LIST_HEAD(write_list);
1018
995 dm_bufio_lock(c); 1019 dm_bufio_lock(c);
996 b = __bufio_new(c, block, nf, &need_submit); 1020 b = __bufio_new(c, block, nf, &need_submit, &write_list);
997 dm_bufio_unlock(c); 1021 dm_bufio_unlock(c);
998 1022
1023 __flush_write_list(&write_list);
1024
999 if (!b) 1025 if (!b)
1000 return b; 1026 return b;
1001 1027
@@ -1047,6 +1073,8 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
1047{ 1073{
1048 struct blk_plug plug; 1074 struct blk_plug plug;
1049 1075
1076 LIST_HEAD(write_list);
1077
1050 BUG_ON(dm_bufio_in_request()); 1078 BUG_ON(dm_bufio_in_request());
1051 1079
1052 blk_start_plug(&plug); 1080 blk_start_plug(&plug);
@@ -1055,7 +1083,15 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
1055 for (; n_blocks--; block++) { 1083 for (; n_blocks--; block++) {
1056 int need_submit; 1084 int need_submit;
1057 struct dm_buffer *b; 1085 struct dm_buffer *b;
1058 b = __bufio_new(c, block, NF_PREFETCH, &need_submit); 1086 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1087 &write_list);
1088 if (unlikely(!list_empty(&write_list))) {
1089 dm_bufio_unlock(c);
1090 blk_finish_plug(&plug);
1091 __flush_write_list(&write_list);
1092 blk_start_plug(&plug);
1093 dm_bufio_lock(c);
1094 }
1059 if (unlikely(b != NULL)) { 1095 if (unlikely(b != NULL)) {
1060 dm_bufio_unlock(c); 1096 dm_bufio_unlock(c);
1061 1097
@@ -1069,7 +1105,6 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
1069 goto flush_plug; 1105 goto flush_plug;
1070 dm_bufio_lock(c); 1106 dm_bufio_lock(c);
1071 } 1107 }
1072
1073 } 1108 }
1074 1109
1075 dm_bufio_unlock(c); 1110 dm_bufio_unlock(c);
@@ -1126,11 +1161,14 @@ EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1126 1161
1127void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c) 1162void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1128{ 1163{
1164 LIST_HEAD(write_list);
1165
1129 BUG_ON(dm_bufio_in_request()); 1166 BUG_ON(dm_bufio_in_request());
1130 1167
1131 dm_bufio_lock(c); 1168 dm_bufio_lock(c);
1132 __write_dirty_buffers_async(c, 0); 1169 __write_dirty_buffers_async(c, 0, &write_list);
1133 dm_bufio_unlock(c); 1170 dm_bufio_unlock(c);
1171 __flush_write_list(&write_list);
1134} 1172}
1135EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async); 1173EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1136 1174
@@ -1147,8 +1185,13 @@ int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1147 unsigned long buffers_processed = 0; 1185 unsigned long buffers_processed = 0;
1148 struct dm_buffer *b, *tmp; 1186 struct dm_buffer *b, *tmp;
1149 1187
1188 LIST_HEAD(write_list);
1189
1190 dm_bufio_lock(c);
1191 __write_dirty_buffers_async(c, 0, &write_list);
1192 dm_bufio_unlock(c);
1193 __flush_write_list(&write_list);
1150 dm_bufio_lock(c); 1194 dm_bufio_lock(c);
1151 __write_dirty_buffers_async(c, 0);
1152 1195
1153again: 1196again:
1154 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { 1197 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
@@ -1274,7 +1317,7 @@ retry:
1274 BUG_ON(!b->hold_count); 1317 BUG_ON(!b->hold_count);
1275 BUG_ON(test_bit(B_READING, &b->state)); 1318 BUG_ON(test_bit(B_READING, &b->state));
1276 1319
1277 __write_dirty_buffer(b); 1320 __write_dirty_buffer(b, NULL);
1278 if (b->hold_count == 1) { 1321 if (b->hold_count == 1) {
1279 wait_on_bit(&b->state, B_WRITING, 1322 wait_on_bit(&b->state, B_WRITING,
1280 do_io_schedule, TASK_UNINTERRUPTIBLE); 1323 do_io_schedule, TASK_UNINTERRUPTIBLE);