aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/writeback.c
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-06-05 09:24:39 -0400
committerKent Overstreet <koverstreet@google.com>2013-06-27 00:58:04 -0400
commit72c270612bd33192fa836ad0f2939af1ca218292 (patch)
tree344129d75f3b5c0abcf77dd4b6340783a126cde8 /drivers/md/bcache/writeback.c
parent279afbad4e54acbd61bf88a54a73af3bbfdeb5dd (diff)
bcache: Write out full stripes
Now that we're tracking dirty data per stripe, we can add two optimizations for raid5/6: * If a stripe is already dirty, force writes to that stripe to writeback mode - to help build up full stripes of dirty data * When flushing dirty data, preferentially write out full stripes first if there are any. Signed-off-by: Kent Overstreet <koverstreet@google.com>
Diffstat (limited to 'drivers/md/bcache/writeback.c')
-rw-r--r--drivers/md/bcache/writeback.c44
1 files changed, 42 insertions, 2 deletions
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index dd815475c524..d81ee5ccc726 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -108,6 +108,31 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k)
108 return KEY_DIRTY(k); 108 return KEY_DIRTY(k);
109} 109}
110 110
111static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k)
112{
113 uint64_t stripe;
114 unsigned nr_sectors = KEY_SIZE(k);
115 struct cached_dev *dc = container_of(buf, struct cached_dev,
116 writeback_keys);
117 unsigned stripe_size = 1 << dc->disk.stripe_size_bits;
118
119 if (!KEY_DIRTY(k))
120 return false;
121
122 stripe = KEY_START(k) >> dc->disk.stripe_size_bits;
123 while (1) {
124 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) !=
125 stripe_size)
126 return false;
127
128 if (nr_sectors <= stripe_size)
129 return true;
130
131 nr_sectors -= stripe_size;
132 stripe++;
133 }
134}
135
111static void dirty_init(struct keybuf_key *w) 136static void dirty_init(struct keybuf_key *w)
112{ 137{
113 struct dirty_io *io = w->private; 138 struct dirty_io *io = w->private;
@@ -152,7 +177,22 @@ static void refill_dirty(struct closure *cl)
152 searched_from_start = true; 177 searched_from_start = true;
153 } 178 }
154 179
155 bch_refill_keybuf(dc->disk.c, buf, &end); 180 if (dc->partial_stripes_expensive) {
181 uint64_t i;
182
183 for (i = 0; i < dc->disk.nr_stripes; i++)
184 if (atomic_read(dc->disk.stripe_sectors_dirty + i) ==
185 1 << dc->disk.stripe_size_bits)
186 goto full_stripes;
187
188 goto normal_refill;
189full_stripes:
190 bch_refill_keybuf(dc->disk.c, buf, &end,
191 dirty_full_stripe_pred);
192 } else {
193normal_refill:
194 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
195 }
156 196
157 if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) { 197 if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) {
158 /* Searched the entire btree - delay awhile */ 198 /* Searched the entire btree - delay awhile */
@@ -446,7 +486,7 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
446 closure_init_unlocked(&dc->writeback); 486 closure_init_unlocked(&dc->writeback);
447 init_rwsem(&dc->writeback_lock); 487 init_rwsem(&dc->writeback_lock);
448 488
449 bch_keybuf_init(&dc->writeback_keys, dirty_pred); 489 bch_keybuf_init(&dc->writeback_keys);
450 490
451 dc->writeback_metadata = true; 491 dc->writeback_metadata = true;
452 dc->writeback_running = true; 492 dc->writeback_running = true;