aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/md/bcache/btree.h2
-rw-r--r--drivers/md/bcache/request.c368
2 files changed, 183 insertions, 187 deletions
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 967aacd20625..ea0814b51574 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -260,7 +260,7 @@ struct btree_op {
260 } type:8; 260 } type:8;
261 261
262 unsigned csum:1; 262 unsigned csum:1;
263 unsigned skip:1; 263 unsigned bypass:1;
264 unsigned flush_journal:1; 264 unsigned flush_journal:1;
265 265
266 unsigned insert_data_done:1; 266 unsigned insert_data_done:1;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index a000e918b795..dbc2ef6e7a35 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -25,8 +25,6 @@
25 25
26struct kmem_cache *bch_search_cache; 26struct kmem_cache *bch_search_cache;
27 27
28static void check_should_skip(struct cached_dev *, struct search *);
29
30/* Cgroup interface */ 28/* Cgroup interface */
31 29
32#ifdef CONFIG_CGROUP_BCACHE 30#ifdef CONFIG_CGROUP_BCACHE
@@ -480,7 +478,7 @@ static void bch_insert_data_loop(struct closure *cl)
480 struct search *s = container_of(op, struct search, op); 478 struct search *s = container_of(op, struct search, op);
481 struct bio *bio = op->cache_bio, *n; 479 struct bio *bio = op->cache_bio, *n;
482 480
483 if (op->skip) 481 if (op->bypass)
484 return bio_invalidate(cl); 482 return bio_invalidate(cl);
485 483
486 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { 484 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
@@ -557,7 +555,7 @@ err:
557 * we wait for buckets to be freed up, so just invalidate the 555 * we wait for buckets to be freed up, so just invalidate the
558 * rest of the write. 556 * rest of the write.
559 */ 557 */
560 op->skip = true; 558 op->bypass = true;
561 return bio_invalidate(cl); 559 return bio_invalidate(cl);
562 } else { 560 } else {
563 /* 561 /*
@@ -590,8 +588,8 @@ err:
590 * It inserts the data in op->cache_bio; bi_sector is used for the key offset, 588 * It inserts the data in op->cache_bio; bi_sector is used for the key offset,
591 * and op->inode is used for the key inode. 589 * and op->inode is used for the key inode.
592 * 590 *
593 * If op->skip is true, instead of inserting the data it invalidates the region 591 * If op->bypass is true, instead of inserting the data it invalidates the
594 * of the cache represented by op->cache_bio and op->inode. 592 * region of the cache represented by op->cache_bio and op->inode.
595 */ 593 */
596void bch_insert_data(struct closure *cl) 594void bch_insert_data(struct closure *cl)
597{ 595{
@@ -717,7 +715,6 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
717 s->orig_bio = bio; 715 s->orig_bio = bio;
718 s->write = (bio->bi_rw & REQ_WRITE) != 0; 716 s->write = (bio->bi_rw & REQ_WRITE) != 0;
719 s->op.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; 717 s->op.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
720 s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0;
721 s->recoverable = 1; 718 s->recoverable = 1;
722 s->start_time = jiffies; 719 s->start_time = jiffies;
723 do_bio_hook(s); 720 do_bio_hook(s);
@@ -757,6 +754,134 @@ static void cached_dev_bio_complete(struct closure *cl)
757 cached_dev_put(dc); 754 cached_dev_put(dc);
758} 755}
759 756
757unsigned bch_get_congested(struct cache_set *c)
758{
759 int i;
760 long rand;
761
762 if (!c->congested_read_threshold_us &&
763 !c->congested_write_threshold_us)
764 return 0;
765
766 i = (local_clock_us() - c->congested_last_us) / 1024;
767 if (i < 0)
768 return 0;
769
770 i += atomic_read(&c->congested);
771 if (i >= 0)
772 return 0;
773
774 i += CONGESTED_MAX;
775
776 if (i > 0)
777 i = fract_exp_two(i, 6);
778
779 rand = get_random_int();
780 i -= bitmap_weight(&rand, BITS_PER_LONG);
781
782 return i > 0 ? i : 1;
783}
784
785static void add_sequential(struct task_struct *t)
786{
787 ewma_add(t->sequential_io_avg,
788 t->sequential_io, 8, 0);
789
790 t->sequential_io = 0;
791}
792
793static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
794{
795 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
796}
797
798static bool check_should_bypass(struct cached_dev *dc, struct search *s)
799{
800 struct cache_set *c = s->op.c;
801 struct bio *bio = &s->bio.bio;
802 unsigned mode = cache_mode(dc, bio);
803 unsigned sectors, congested = bch_get_congested(c);
804
805 if (atomic_read(&dc->disk.detaching) ||
806 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
807 (bio->bi_rw & REQ_DISCARD))
808 goto skip;
809
810 if (mode == CACHE_MODE_NONE ||
811 (mode == CACHE_MODE_WRITEAROUND &&
812 (bio->bi_rw & REQ_WRITE)))
813 goto skip;
814
815 if (bio->bi_sector & (c->sb.block_size - 1) ||
816 bio_sectors(bio) & (c->sb.block_size - 1)) {
817 pr_debug("skipping unaligned io");
818 goto skip;
819 }
820
821 if (!congested && !dc->sequential_cutoff)
822 goto rescale;
823
824 if (!congested &&
825 mode == CACHE_MODE_WRITEBACK &&
826 (bio->bi_rw & REQ_WRITE) &&
827 (bio->bi_rw & REQ_SYNC))
828 goto rescale;
829
830 if (dc->sequential_merge) {
831 struct io *i;
832
833 spin_lock(&dc->io_lock);
834
835 hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
836 if (i->last == bio->bi_sector &&
837 time_before(jiffies, i->jiffies))
838 goto found;
839
840 i = list_first_entry(&dc->io_lru, struct io, lru);
841
842 add_sequential(s->task);
843 i->sequential = 0;
844found:
845 if (i->sequential + bio->bi_size > i->sequential)
846 i->sequential += bio->bi_size;
847
848 i->last = bio_end_sector(bio);
849 i->jiffies = jiffies + msecs_to_jiffies(5000);
850 s->task->sequential_io = i->sequential;
851
852 hlist_del(&i->hash);
853 hlist_add_head(&i->hash, iohash(dc, i->last));
854 list_move_tail(&i->lru, &dc->io_lru);
855
856 spin_unlock(&dc->io_lock);
857 } else {
858 s->task->sequential_io = bio->bi_size;
859
860 add_sequential(s->task);
861 }
862
863 sectors = max(s->task->sequential_io,
864 s->task->sequential_io_avg) >> 9;
865
866 if (dc->sequential_cutoff &&
867 sectors >= dc->sequential_cutoff >> 9) {
868 trace_bcache_bypass_sequential(s->orig_bio);
869 goto skip;
870 }
871
872 if (congested && sectors >= congested) {
873 trace_bcache_bypass_congested(s->orig_bio);
874 goto skip;
875 }
876
877rescale:
878 bch_rescale_priorities(c, bio_sectors(bio));
879 return false;
880skip:
881 bch_mark_sectors_bypassed(s, bio_sectors(bio));
882 return true;
883}
884
760/* Process reads */ 885/* Process reads */
761 886
762static void cached_dev_read_complete(struct closure *cl) 887static void cached_dev_read_complete(struct closure *cl)
@@ -854,8 +979,8 @@ static void request_read_done_bh(struct closure *cl)
854 struct search *s = container_of(cl, struct search, cl); 979 struct search *s = container_of(cl, struct search, cl);
855 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 980 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
856 981
857 bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip); 982 bch_mark_cache_accounting(s, !s->cache_miss, s->op.bypass);
858 trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.skip); 983 trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.bypass);
859 984
860 if (s->error) 985 if (s->error)
861 continue_at_nobarrier(cl, request_read_error, bcache_wq); 986 continue_at_nobarrier(cl, request_read_error, bcache_wq);
@@ -873,7 +998,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
873 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 998 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
874 struct bio *miss; 999 struct bio *miss;
875 1000
876 if (s->cache_miss || s->op.skip) { 1001 if (s->cache_miss || s->op.bypass) {
877 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 1002 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
878 if (miss == bio) 1003 if (miss == bio)
879 s->op.lookup_done = true; 1004 s->op.lookup_done = true;
@@ -940,9 +1065,7 @@ static void request_read(struct cached_dev *dc, struct search *s)
940{ 1065{
941 struct closure *cl = &s->cl; 1066 struct closure *cl = &s->cl;
942 1067
943 check_should_skip(dc, s);
944 closure_call(&s->op.cl, btree_read_async, NULL, cl); 1068 closure_call(&s->op.cl, btree_read_async, NULL, cl);
945
946 continue_at(cl, request_read_done_bh, NULL); 1069 continue_at(cl, request_read_done_bh, NULL);
947} 1070}
948 1071
@@ -961,41 +1084,48 @@ static void request_write(struct cached_dev *dc, struct search *s)
961{ 1084{
962 struct closure *cl = &s->cl; 1085 struct closure *cl = &s->cl;
963 struct bio *bio = &s->bio.bio; 1086 struct bio *bio = &s->bio.bio;
964 struct bkey start, end; 1087 struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0);
965 start = KEY(dc->disk.id, bio->bi_sector, 0); 1088 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
966 end = KEY(dc->disk.id, bio_end_sector(bio), 0);
967 1089
968 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end); 1090 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end);
969 1091
970 check_should_skip(dc, s);
971 down_read_non_owner(&dc->writeback_lock); 1092 down_read_non_owner(&dc->writeback_lock);
972
973 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) { 1093 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
974 s->op.skip = false; 1094 /*
1095 * We overlap with some dirty data undergoing background
1096 * writeback, force this write to writeback
1097 */
1098 s->op.bypass = false;
975 s->writeback = true; 1099 s->writeback = true;
976 } 1100 }
977 1101
1102 /*
1103 * Discards aren't _required_ to do anything, so skipping if
1104 * check_overlapping returned true is ok
1105 *
1106 * But check_overlapping drops dirty keys for which io hasn't started,
1107 * so we still want to call it.
1108 */
978 if (bio->bi_rw & REQ_DISCARD) 1109 if (bio->bi_rw & REQ_DISCARD)
979 goto skip; 1110 s->op.bypass = true;
980 1111
981 if (should_writeback(dc, s->orig_bio, 1112 if (should_writeback(dc, s->orig_bio,
982 cache_mode(dc, bio), 1113 cache_mode(dc, bio),
983 s->op.skip)) { 1114 s->op.bypass)) {
984 s->op.skip = false; 1115 s->op.bypass = false;
985 s->writeback = true; 1116 s->writeback = true;
986 } 1117 }
987 1118
988 if (s->op.skip) 1119 trace_bcache_write(s->orig_bio, s->writeback, s->op.bypass);
989 goto skip;
990
991 trace_bcache_write(s->orig_bio, s->writeback, s->op.skip);
992 1120
993 if (!s->writeback) { 1121 if (s->op.bypass) {
994 s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, 1122 s->op.cache_bio = s->orig_bio;
995 dc->disk.bio_split); 1123 bio_get(s->op.cache_bio);
996 1124
997 closure_bio_submit(bio, cl, s->d); 1125 if (!(bio->bi_rw & REQ_DISCARD) ||
998 } else { 1126 blk_queue_discard(bdev_get_queue(dc->bdev)))
1127 closure_bio_submit(bio, cl, s->d);
1128 } else if (s->writeback) {
999 bch_writeback_add(dc); 1129 bch_writeback_add(dc);
1000 s->op.cache_bio = bio; 1130 s->op.cache_bio = bio;
1001 1131
@@ -1011,21 +1141,15 @@ static void request_write(struct cached_dev *dc, struct search *s)
1011 1141
1012 closure_bio_submit(flush, cl, s->d); 1142 closure_bio_submit(flush, cl, s->d);
1013 } 1143 }
1144 } else {
1145 s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
1146 dc->disk.bio_split);
1147
1148 closure_bio_submit(bio, cl, s->d);
1014 } 1149 }
1015out: 1150
1016 closure_call(&s->op.cl, bch_insert_data, NULL, cl); 1151 closure_call(&s->op.cl, bch_insert_data, NULL, cl);
1017 continue_at(cl, cached_dev_write_complete, NULL); 1152 continue_at(cl, cached_dev_write_complete, NULL);
1018skip:
1019 s->op.skip = true;
1020 s->op.cache_bio = s->orig_bio;
1021 bio_get(s->op.cache_bio);
1022
1023 if ((bio->bi_rw & REQ_DISCARD) &&
1024 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1025 goto out;
1026
1027 closure_bio_submit(bio, cl, s->d);
1028 goto out;
1029} 1153}
1030 1154
1031static void request_nodata(struct cached_dev *dc, struct search *s) 1155static void request_nodata(struct cached_dev *dc, struct search *s)
@@ -1033,14 +1157,10 @@ static void request_nodata(struct cached_dev *dc, struct search *s)
1033 struct closure *cl = &s->cl; 1157 struct closure *cl = &s->cl;
1034 struct bio *bio = &s->bio.bio; 1158 struct bio *bio = &s->bio.bio;
1035 1159
1036 if (bio->bi_rw & REQ_DISCARD) {
1037 request_write(dc, s);
1038 return;
1039 }
1040
1041 if (s->op.flush_journal) 1160 if (s->op.flush_journal)
1042 bch_journal_meta(s->op.c, cl); 1161 bch_journal_meta(s->op.c, cl);
1043 1162
1163 /* If it's a flush, we send the flush to the backing device too */
1044 closure_bio_submit(bio, cl, s->d); 1164 closure_bio_submit(bio, cl, s->d);
1045 1165
1046 continue_at(cl, cached_dev_bio_complete, NULL); 1166 continue_at(cl, cached_dev_bio_complete, NULL);
@@ -1048,134 +1168,6 @@ static void request_nodata(struct cached_dev *dc, struct search *s)
1048 1168
1049/* Cached devices - read & write stuff */ 1169/* Cached devices - read & write stuff */
1050 1170
1051unsigned bch_get_congested(struct cache_set *c)
1052{
1053 int i;
1054 long rand;
1055
1056 if (!c->congested_read_threshold_us &&
1057 !c->congested_write_threshold_us)
1058 return 0;
1059
1060 i = (local_clock_us() - c->congested_last_us) / 1024;
1061 if (i < 0)
1062 return 0;
1063
1064 i += atomic_read(&c->congested);
1065 if (i >= 0)
1066 return 0;
1067
1068 i += CONGESTED_MAX;
1069
1070 if (i > 0)
1071 i = fract_exp_two(i, 6);
1072
1073 rand = get_random_int();
1074 i -= bitmap_weight(&rand, BITS_PER_LONG);
1075
1076 return i > 0 ? i : 1;
1077}
1078
1079static void add_sequential(struct task_struct *t)
1080{
1081 ewma_add(t->sequential_io_avg,
1082 t->sequential_io, 8, 0);
1083
1084 t->sequential_io = 0;
1085}
1086
1087static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
1088{
1089 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
1090}
1091
1092static void check_should_skip(struct cached_dev *dc, struct search *s)
1093{
1094 struct cache_set *c = s->op.c;
1095 struct bio *bio = &s->bio.bio;
1096 unsigned mode = cache_mode(dc, bio);
1097 unsigned sectors, congested = bch_get_congested(c);
1098
1099 if (atomic_read(&dc->disk.detaching) ||
1100 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
1101 (bio->bi_rw & REQ_DISCARD))
1102 goto skip;
1103
1104 if (mode == CACHE_MODE_NONE ||
1105 (mode == CACHE_MODE_WRITEAROUND &&
1106 (bio->bi_rw & REQ_WRITE)))
1107 goto skip;
1108
1109 if (bio->bi_sector & (c->sb.block_size - 1) ||
1110 bio_sectors(bio) & (c->sb.block_size - 1)) {
1111 pr_debug("skipping unaligned io");
1112 goto skip;
1113 }
1114
1115 if (!congested && !dc->sequential_cutoff)
1116 goto rescale;
1117
1118 if (!congested &&
1119 mode == CACHE_MODE_WRITEBACK &&
1120 (bio->bi_rw & REQ_WRITE) &&
1121 (bio->bi_rw & REQ_SYNC))
1122 goto rescale;
1123
1124 if (dc->sequential_merge) {
1125 struct io *i;
1126
1127 spin_lock(&dc->io_lock);
1128
1129 hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
1130 if (i->last == bio->bi_sector &&
1131 time_before(jiffies, i->jiffies))
1132 goto found;
1133
1134 i = list_first_entry(&dc->io_lru, struct io, lru);
1135
1136 add_sequential(s->task);
1137 i->sequential = 0;
1138found:
1139 if (i->sequential + bio->bi_size > i->sequential)
1140 i->sequential += bio->bi_size;
1141
1142 i->last = bio_end_sector(bio);
1143 i->jiffies = jiffies + msecs_to_jiffies(5000);
1144 s->task->sequential_io = i->sequential;
1145
1146 hlist_del(&i->hash);
1147 hlist_add_head(&i->hash, iohash(dc, i->last));
1148 list_move_tail(&i->lru, &dc->io_lru);
1149
1150 spin_unlock(&dc->io_lock);
1151 } else {
1152 s->task->sequential_io = bio->bi_size;
1153
1154 add_sequential(s->task);
1155 }
1156
1157 sectors = max(s->task->sequential_io,
1158 s->task->sequential_io_avg) >> 9;
1159
1160 if (dc->sequential_cutoff &&
1161 sectors >= dc->sequential_cutoff >> 9) {
1162 trace_bcache_bypass_sequential(s->orig_bio);
1163 goto skip;
1164 }
1165
1166 if (congested && sectors >= congested) {
1167 trace_bcache_bypass_congested(s->orig_bio);
1168 goto skip;
1169 }
1170
1171rescale:
1172 bch_rescale_priorities(c, bio_sectors(bio));
1173 return;
1174skip:
1175 bch_mark_sectors_bypassed(s, bio_sectors(bio));
1176 s->op.skip = true;
1177}
1178
1179static void cached_dev_make_request(struct request_queue *q, struct bio *bio) 1171static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
1180{ 1172{
1181 struct search *s; 1173 struct search *s;
@@ -1195,12 +1187,16 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
1195 s = search_alloc(bio, d); 1187 s = search_alloc(bio, d);
1196 trace_bcache_request_start(s, bio); 1188 trace_bcache_request_start(s, bio);
1197 1189
1198 if (!bio_has_data(bio)) 1190 if (!bio->bi_size)
1199 request_nodata(dc, s); 1191 request_nodata(dc, s);
1200 else if (rw) 1192 else {
1201 request_write(dc, s); 1193 s->op.bypass = check_should_bypass(dc, s);
1202 else 1194
1203 request_read(dc, s); 1195 if (rw)
1196 request_write(dc, s);
1197 else
1198 request_read(dc, s);
1199 }
1204 } else { 1200 } else {
1205 if ((bio->bi_rw & REQ_DISCARD) && 1201 if ((bio->bi_rw & REQ_DISCARD) &&
1206 !blk_queue_discard(bdev_get_queue(dc->bdev))) 1202 !blk_queue_discard(bdev_get_queue(dc->bdev)))
@@ -1298,21 +1294,21 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1298 1294
1299 trace_bcache_request_start(s, bio); 1295 trace_bcache_request_start(s, bio);
1300 1296
1301 if (bio_has_data(bio) && !rw) { 1297 if (!bio->bi_size) {
1302 closure_call(&s->op.cl, btree_read_async, NULL, cl); 1298 if (s->op.flush_journal)
1303 } else if (bio_has_data(bio) || s->op.skip) { 1299 bch_journal_meta(s->op.c, cl);
1300 } else if (rw) {
1304 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, 1301 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys,
1305 &KEY(d->id, bio->bi_sector, 0), 1302 &KEY(d->id, bio->bi_sector, 0),
1306 &KEY(d->id, bio_end_sector(bio), 0)); 1303 &KEY(d->id, bio_end_sector(bio), 0));
1307 1304
1305 s->op.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
1308 s->writeback = true; 1306 s->writeback = true;
1309 s->op.cache_bio = bio; 1307 s->op.cache_bio = bio;
1310 1308
1311 closure_call(&s->op.cl, bch_insert_data, NULL, cl); 1309 closure_call(&s->op.cl, bch_insert_data, NULL, cl);
1312 } else { 1310 } else {
1313 /* No data - probably a cache flush */ 1311 closure_call(&s->op.cl, btree_read_async, NULL, cl);
1314 if (s->op.flush_journal)
1315 bch_journal_meta(s->op.c, cl);
1316 } 1312 }
1317 1313
1318 continue_at(cl, search_free, NULL); 1314 continue_at(cl, search_free, NULL);