aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/writeback.h
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-06-05 09:24:39 -0400
committerKent Overstreet <koverstreet@google.com>2013-06-27 00:58:04 -0400
commit72c270612bd33192fa836ad0f2939af1ca218292 (patch)
tree344129d75f3b5c0abcf77dd4b6340783a126cde8 /drivers/md/bcache/writeback.h
parent279afbad4e54acbd61bf88a54a73af3bbfdeb5dd (diff)
bcache: Write out full stripes
Now that we're tracking dirty data per stripe, we can add two optimizations for raid5/6: * If a stripe is already dirty, force writes to that stripe to writeback mode - to help build up full stripes of dirty data * When flushing dirty data, preferentially write out full stripes first if there are any. Signed-off-by: Kent Overstreet <koverstreet@google.com>
Diffstat (limited to 'drivers/md/bcache/writeback.h')
-rw-r--r--drivers/md/bcache/writeback.h43
1 files changed, 43 insertions, 0 deletions
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 5ce9771df047..c91f61bb95b6 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -1,6 +1,9 @@
1#ifndef _BCACHE_WRITEBACK_H 1#ifndef _BCACHE_WRITEBACK_H
2#define _BCACHE_WRITEBACK_H 2#define _BCACHE_WRITEBACK_H
3 3
4#define CUTOFF_WRITEBACK 40
5#define CUTOFF_WRITEBACK_SYNC 70
6
4static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) 7static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
5{ 8{
6 uint64_t i, ret = 0; 9 uint64_t i, ret = 0;
@@ -11,6 +14,46 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
11 return ret; 14 return ret;
12} 15}
13 16
17static inline bool bcache_dev_stripe_dirty(struct bcache_device *d,
18 uint64_t offset,
19 unsigned nr_sectors)
20{
21 uint64_t stripe = offset >> d->stripe_size_bits;
22
23 while (1) {
24 if (atomic_read(d->stripe_sectors_dirty + stripe))
25 return true;
26
27 if (nr_sectors <= 1 << d->stripe_size_bits)
28 return false;
29
30 nr_sectors -= 1 << d->stripe_size_bits;
31 stripe++;
32 }
33}
34
35static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
36 unsigned cache_mode, bool would_skip)
37{
38 unsigned in_use = dc->disk.c->gc_stats.in_use;
39
40 if (cache_mode != CACHE_MODE_WRITEBACK ||
41 atomic_read(&dc->disk.detaching) ||
42 in_use > CUTOFF_WRITEBACK_SYNC)
43 return false;
44
45 if (dc->partial_stripes_expensive &&
46 bcache_dev_stripe_dirty(&dc->disk, bio->bi_sector,
47 bio_sectors(bio)))
48 return true;
49
50 if (would_skip)
51 return false;
52
53 return bio->bi_rw & REQ_SYNC ||
54 in_use <= CUTOFF_WRITEBACK;
55}
56
14void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int); 57void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
15void bch_writeback_queue(struct cached_dev *); 58void bch_writeback_queue(struct cached_dev *);
16void bch_writeback_add(struct cached_dev *); 59void bch_writeback_add(struct cached_dev *);