aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2015-02-20 09:22:17 -0500
committerMike Snitzer <snitzer@redhat.com>2015-03-31 12:03:48 -0400
commite65ff8703f56273c6dc8b77373f4d2bef6e35107 (patch)
tree64f115b6ea2912e0605be69e45839d16fcf2c924
parentfdecee3224d90e51c611198baeb0c38e568ca0e8 (diff)
dm cache policy mq: try not to writeback data that changed in the last second
Writeback takes out a lock on the cache block, so will increase the latency for any concurrent io. This patch works by placing 2 sentinel objects on each level of the multiqueues. Every WRITEBACK_PERIOD the oldest sentinel gets moved to the newest end of the queue level. When looking for writeback work: if less than 25% of the cache is clean: we select the oldest object with the lowest hit count otherwise: we select the oldest object that is not past a writeback sentinel. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r--drivers/md/dm-cache-policy-mq.c94
1 files changed, 93 insertions, 1 deletions
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 6bfb39411fa9..3ddd1162334d 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -8,6 +8,7 @@
8#include "dm.h" 8#include "dm.h"
9 9
10#include <linux/hash.h> 10#include <linux/hash.h>
11#include <linux/jiffies.h>
11#include <linux/module.h> 12#include <linux/module.h>
12#include <linux/mutex.h> 13#include <linux/mutex.h>
13#include <linux/slab.h> 14#include <linux/slab.h>
@@ -126,8 +127,12 @@ static void iot_examine_bio(struct io_tracker *t, struct bio *bio)
126#define NR_QUEUE_LEVELS 16u 127#define NR_QUEUE_LEVELS 16u
127#define NR_SENTINELS NR_QUEUE_LEVELS * 3 128#define NR_SENTINELS NR_QUEUE_LEVELS * 3
128 129
130#define WRITEBACK_PERIOD HZ
131
129struct queue { 132struct queue {
130 unsigned nr_elts; 133 unsigned nr_elts;
134 bool current_writeback_sentinels;
135 unsigned long next_writeback;
131 struct list_head qs[NR_QUEUE_LEVELS]; 136 struct list_head qs[NR_QUEUE_LEVELS];
132 struct list_head sentinels[NR_SENTINELS]; 137 struct list_head sentinels[NR_SENTINELS];
133}; 138};
@@ -137,12 +142,21 @@ static void queue_init(struct queue *q)
137 unsigned i; 142 unsigned i;
138 143
139 q->nr_elts = 0; 144 q->nr_elts = 0;
145 q->current_writeback_sentinels = false;
146 q->next_writeback = 0;
140 for (i = 0; i < NR_QUEUE_LEVELS; i++) { 147 for (i = 0; i < NR_QUEUE_LEVELS; i++) {
141 INIT_LIST_HEAD(q->qs + i); 148 INIT_LIST_HEAD(q->qs + i);
142 INIT_LIST_HEAD(q->sentinels + i); 149 INIT_LIST_HEAD(q->sentinels + i);
150 INIT_LIST_HEAD(q->sentinels + NR_QUEUE_LEVELS + i);
151 INIT_LIST_HEAD(q->sentinels + (2 * NR_QUEUE_LEVELS) + i);
143 } 152 }
144} 153}
145 154
155static unsigned queue_size(struct queue *q)
156{
157 return q->nr_elts;
158}
159
146static bool queue_empty(struct queue *q) 160static bool queue_empty(struct queue *q)
147{ 161{
148 return q->nr_elts == 0; 162 return q->nr_elts == 0;
@@ -197,6 +211,27 @@ static struct list_head *queue_pop(struct queue *q)
197 return r; 211 return r;
198} 212}
199 213
214/*
215 * Pops an entry from a level that is not past a sentinel.
216 */
217static struct list_head *queue_pop_old(struct queue *q)
218{
219 unsigned level;
220 struct list_head *h;
221
222 for (level = 0; level < NR_QUEUE_LEVELS; level++)
223 list_for_each(h, q->qs + level) {
224 if (is_sentinel(q, h))
225 break;
226
227 q->nr_elts--;
228 list_del(h);
229 return h;
230 }
231
232 return NULL;
233}
234
200static struct list_head *list_pop(struct list_head *lh) 235static struct list_head *list_pop(struct list_head *lh)
201{ 236{
202 struct list_head *r = lh->next; 237 struct list_head *r = lh->next;
@@ -207,6 +242,31 @@ static struct list_head *list_pop(struct list_head *lh)
207 return r; 242 return r;
208} 243}
209 244
245static struct list_head *writeback_sentinel(struct queue *q, unsigned level)
246{
247 if (q->current_writeback_sentinels)
248 return q->sentinels + NR_QUEUE_LEVELS + level;
249 else
250 return q->sentinels + 2 * NR_QUEUE_LEVELS + level;
251}
252
253static void queue_update_writeback_sentinels(struct queue *q)
254{
255 unsigned i;
256 struct list_head *h;
257
258 if (time_after(jiffies, q->next_writeback)) {
259 for (i = 0; i < NR_QUEUE_LEVELS; i++) {
260 h = writeback_sentinel(q, i);
261 list_del(h);
262 list_add_tail(h, q->qs + i);
263 }
264
265 q->next_writeback = jiffies + WRITEBACK_PERIOD;
266 q->current_writeback_sentinels = !q->current_writeback_sentinels;
267 }
268}
269
210/* 270/*
211 * Sometimes we want to iterate through entries that have been pushed since 271 * Sometimes we want to iterate through entries that have been pushed since
212 * a certain event. We use sentinel entries on the queues to delimit these 272 * a certain event. We use sentinel entries on the queues to delimit these
@@ -540,6 +600,20 @@ static struct entry *pop(struct mq_policy *mq, struct queue *q)
540 return e; 600 return e;
541} 601}
542 602
603static struct entry *pop_old(struct mq_policy *mq, struct queue *q)
604{
605 struct entry *e;
606 struct list_head *h = queue_pop_old(q);
607
608 if (!h)
609 return NULL;
610
611 e = container_of(h, struct entry, list);
612 hash_remove(e);
613
614 return e;
615}
616
543static struct entry *peek(struct queue *q) 617static struct entry *peek(struct queue *q)
544{ 618{
545 struct list_head *h = queue_peek(q); 619 struct list_head *h = queue_peek(q);
@@ -932,6 +1006,7 @@ static void copy_tick(struct mq_policy *mq)
932 queue_tick(&mq->pre_cache); 1006 queue_tick(&mq->pre_cache);
933 queue_tick(&mq->cache_dirty); 1007 queue_tick(&mq->cache_dirty);
934 queue_tick(&mq->cache_clean); 1008 queue_tick(&mq->cache_clean);
1009 queue_update_writeback_sentinels(&mq->cache_dirty);
935 spin_unlock_irqrestore(&mq->tick_lock, flags); 1010 spin_unlock_irqrestore(&mq->tick_lock, flags);
936} 1011}
937 1012
@@ -1112,10 +1187,27 @@ static int mq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
1112 return r; 1187 return r;
1113} 1188}
1114 1189
1190#define CLEAN_TARGET_PERCENTAGE 25
1191
1192static bool clean_target_met(struct mq_policy *mq)
1193{
1194 /*
1195 * Cache entries may not be populated. So we're cannot rely on the
1196 * size of the clean queue.
1197 */
1198 unsigned nr_clean = from_cblock(mq->cache_size) - queue_size(&mq->cache_dirty);
1199 unsigned target = from_cblock(mq->cache_size) * CLEAN_TARGET_PERCENTAGE / 100;
1200
1201 return nr_clean >= target;
1202}
1203
1115static int __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock, 1204static int __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock,
1116 dm_cblock_t *cblock) 1205 dm_cblock_t *cblock)
1117{ 1206{
1118 struct entry *e = pop(mq, &mq->cache_dirty); 1207 struct entry *e = pop_old(mq, &mq->cache_dirty);
1208
1209 if (!e && !clean_target_met(mq))
1210 e = pop(mq, &mq->cache_dirty);
1119 1211
1120 if (!e) 1212 if (!e)
1121 return -ENODATA; 1213 return -ENODATA;