aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/bcache/alloc.c')
-rw-r--r--drivers/md/bcache/alloc.c140
1 files changed, 58 insertions, 82 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index a59ef6147fc7..443d03fbac47 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -78,12 +78,6 @@ uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
78 ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b)); 78 ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
79 WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX); 79 WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
80 80
81 if (CACHE_SYNC(&ca->set->sb)) {
82 ca->need_save_prio = max(ca->need_save_prio,
83 bucket_disk_gen(b));
84 WARN_ON_ONCE(ca->need_save_prio > BUCKET_DISK_GEN_MAX);
85 }
86
87 return ret; 81 return ret;
88} 82}
89 83
@@ -120,58 +114,46 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
120 mutex_unlock(&c->bucket_lock); 114 mutex_unlock(&c->bucket_lock);
121} 115}
122 116
123/* Allocation */ 117/*
118 * Background allocation thread: scans for buckets to be invalidated,
119 * invalidates them, rewrites prios/gens (marking them as invalidated on disk),
120 * then optionally issues discard commands to the newly free buckets, then puts
121 * them on the various freelists.
122 */
124 123
125static inline bool can_inc_bucket_gen(struct bucket *b) 124static inline bool can_inc_bucket_gen(struct bucket *b)
126{ 125{
127 return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX && 126 return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX;
128 bucket_disk_gen(b) < BUCKET_DISK_GEN_MAX;
129} 127}
130 128
131bool bch_bucket_add_unused(struct cache *ca, struct bucket *b) 129bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
132{ 130{
133 BUG_ON(GC_MARK(b) || GC_SECTORS_USED(b)); 131 BUG_ON(!ca->set->gc_mark_valid);
134 132
135 if (CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO) {
136 unsigned i;
137
138 for (i = 0; i < RESERVE_NONE; i++)
139 if (!fifo_full(&ca->free[i]))
140 goto add;
141
142 return false;
143 }
144add:
145 b->prio = 0;
146
147 if (can_inc_bucket_gen(b) &&
148 fifo_push(&ca->unused, b - ca->buckets)) {
149 atomic_inc(&b->pin);
150 return true;
151 }
152
153 return false;
154}
155
156static bool can_invalidate_bucket(struct cache *ca, struct bucket *b)
157{
158 return (!GC_MARK(b) || 133 return (!GC_MARK(b) ||
159 GC_MARK(b) == GC_MARK_RECLAIMABLE) && 134 GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
160 !atomic_read(&b->pin) && 135 !atomic_read(&b->pin) &&
161 can_inc_bucket_gen(b); 136 can_inc_bucket_gen(b);
162} 137}
163 138
164static void invalidate_one_bucket(struct cache *ca, struct bucket *b) 139void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
165{ 140{
166 size_t bucket = b - ca->buckets; 141 lockdep_assert_held(&ca->set->bucket_lock);
142 BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE);
167 143
168 if (GC_SECTORS_USED(b)) 144 if (GC_SECTORS_USED(b))
169 trace_bcache_invalidate(ca, bucket); 145 trace_bcache_invalidate(ca, b - ca->buckets);
170 146
171 bch_inc_gen(ca, b); 147 bch_inc_gen(ca, b);
172 b->prio = INITIAL_PRIO; 148 b->prio = INITIAL_PRIO;
173 atomic_inc(&b->pin); 149 atomic_inc(&b->pin);
174 fifo_push(&ca->free_inc, bucket); 150}
151
152static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
153{
154 __bch_invalidate_one_bucket(ca, b);
155
156 fifo_push(&ca->free_inc, b - ca->buckets);
175} 157}
176 158
177/* 159/*
@@ -201,20 +183,7 @@ static void invalidate_buckets_lru(struct cache *ca)
201 ca->heap.used = 0; 183 ca->heap.used = 0;
202 184
203 for_each_bucket(b, ca) { 185 for_each_bucket(b, ca) {
204 /* 186 if (!bch_can_invalidate_bucket(ca, b))
205 * If we fill up the unused list, if we then return before
206 * adding anything to the free_inc list we'll skip writing
207 * prios/gens and just go back to allocating from the unused
208 * list:
209 */
210 if (fifo_full(&ca->unused))
211 return;
212
213 if (!can_invalidate_bucket(ca, b))
214 continue;
215
216 if (!GC_SECTORS_USED(b) &&
217 bch_bucket_add_unused(ca, b))
218 continue; 187 continue;
219 188
220 if (!heap_full(&ca->heap)) 189 if (!heap_full(&ca->heap))
@@ -239,7 +208,7 @@ static void invalidate_buckets_lru(struct cache *ca)
239 return; 208 return;
240 } 209 }
241 210
242 invalidate_one_bucket(ca, b); 211 bch_invalidate_one_bucket(ca, b);
243 } 212 }
244} 213}
245 214
@@ -255,8 +224,8 @@ static void invalidate_buckets_fifo(struct cache *ca)
255 224
256 b = ca->buckets + ca->fifo_last_bucket++; 225 b = ca->buckets + ca->fifo_last_bucket++;
257 226
258 if (can_invalidate_bucket(ca, b)) 227 if (bch_can_invalidate_bucket(ca, b))
259 invalidate_one_bucket(ca, b); 228 bch_invalidate_one_bucket(ca, b);
260 229
261 if (++checked >= ca->sb.nbuckets) { 230 if (++checked >= ca->sb.nbuckets) {
262 ca->invalidate_needs_gc = 1; 231 ca->invalidate_needs_gc = 1;
@@ -280,8 +249,8 @@ static void invalidate_buckets_random(struct cache *ca)
280 249
281 b = ca->buckets + n; 250 b = ca->buckets + n;
282 251
283 if (can_invalidate_bucket(ca, b)) 252 if (bch_can_invalidate_bucket(ca, b))
284 invalidate_one_bucket(ca, b); 253 bch_invalidate_one_bucket(ca, b);
285 254
286 if (++checked >= ca->sb.nbuckets / 2) { 255 if (++checked >= ca->sb.nbuckets / 2) {
287 ca->invalidate_needs_gc = 1; 256 ca->invalidate_needs_gc = 1;
@@ -293,8 +262,7 @@ static void invalidate_buckets_random(struct cache *ca)
293 262
294static void invalidate_buckets(struct cache *ca) 263static void invalidate_buckets(struct cache *ca)
295{ 264{
296 if (ca->invalidate_needs_gc) 265 BUG_ON(ca->invalidate_needs_gc);
297 return;
298 266
299 switch (CACHE_REPLACEMENT(&ca->sb)) { 267 switch (CACHE_REPLACEMENT(&ca->sb)) {
300 case CACHE_REPLACEMENT_LRU: 268 case CACHE_REPLACEMENT_LRU:
@@ -354,17 +322,10 @@ static int bch_allocator_thread(void *arg)
354 * possibly issue discards to them, then we add the bucket to 322 * possibly issue discards to them, then we add the bucket to
355 * the free list: 323 * the free list:
356 */ 324 */
357 while (1) { 325 while (!fifo_empty(&ca->free_inc)) {
358 long bucket; 326 long bucket;
359 327
360 if ((!atomic_read(&ca->set->prio_blocked) || 328 fifo_pop(&ca->free_inc, bucket);
361 !CACHE_SYNC(&ca->set->sb)) &&
362 !fifo_empty(&ca->unused))
363 fifo_pop(&ca->unused, bucket);
364 else if (!fifo_empty(&ca->free_inc))
365 fifo_pop(&ca->free_inc, bucket);
366 else
367 break;
368 329
369 if (ca->discard) { 330 if (ca->discard) {
370 mutex_unlock(&ca->set->bucket_lock); 331 mutex_unlock(&ca->set->bucket_lock);
@@ -385,9 +346,9 @@ static int bch_allocator_thread(void *arg)
385 * them to the free_inc list: 346 * them to the free_inc list:
386 */ 347 */
387 348
349retry_invalidate:
388 allocator_wait(ca, ca->set->gc_mark_valid && 350 allocator_wait(ca, ca->set->gc_mark_valid &&
389 (ca->need_save_prio > 64 || 351 !ca->invalidate_needs_gc);
390 !ca->invalidate_needs_gc));
391 invalidate_buckets(ca); 352 invalidate_buckets(ca);
392 353
393 /* 354 /*
@@ -395,13 +356,28 @@ static int bch_allocator_thread(void *arg)
395 * new stuff to them: 356 * new stuff to them:
396 */ 357 */
397 allocator_wait(ca, !atomic_read(&ca->set->prio_blocked)); 358 allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
398 if (CACHE_SYNC(&ca->set->sb) && 359 if (CACHE_SYNC(&ca->set->sb)) {
399 (!fifo_empty(&ca->free_inc) || 360 /*
400 ca->need_save_prio > 64)) 361 * This could deadlock if an allocation with a btree
362 * node locked ever blocked - having the btree node
363 * locked would block garbage collection, but here we're
364 * waiting on garbage collection before we invalidate
365 * and free anything.
366 *
367 * But this should be safe since the btree code always
368 * uses btree_check_reserve() before allocating now, and
369 * if it fails it blocks without btree nodes locked.
370 */
371 if (!fifo_full(&ca->free_inc))
372 goto retry_invalidate;
373
401 bch_prio_write(ca); 374 bch_prio_write(ca);
375 }
402 } 376 }
403} 377}
404 378
379/* Allocation */
380
405long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait) 381long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
406{ 382{
407 DEFINE_WAIT(w); 383 DEFINE_WAIT(w);
@@ -447,8 +423,6 @@ out:
447 BUG_ON(i == r); 423 BUG_ON(i == r);
448 fifo_for_each(i, &ca->free_inc, iter) 424 fifo_for_each(i, &ca->free_inc, iter)
449 BUG_ON(i == r); 425 BUG_ON(i == r);
450 fifo_for_each(i, &ca->unused, iter)
451 BUG_ON(i == r);
452 } 426 }
453 427
454 b = ca->buckets + r; 428 b = ca->buckets + r;
@@ -470,17 +444,19 @@ out:
470 return r; 444 return r;
471} 445}
472 446
447void __bch_bucket_free(struct cache *ca, struct bucket *b)
448{
449 SET_GC_MARK(b, 0);
450 SET_GC_SECTORS_USED(b, 0);
451}
452
473void bch_bucket_free(struct cache_set *c, struct bkey *k) 453void bch_bucket_free(struct cache_set *c, struct bkey *k)
474{ 454{
475 unsigned i; 455 unsigned i;
476 456
477 for (i = 0; i < KEY_PTRS(k); i++) { 457 for (i = 0; i < KEY_PTRS(k); i++)
478 struct bucket *b = PTR_BUCKET(c, k, i); 458 __bch_bucket_free(PTR_CACHE(c, k, i),
479 459 PTR_BUCKET(c, k, i));
480 SET_GC_MARK(b, 0);
481 SET_GC_SECTORS_USED(b, 0);
482 bch_bucket_add_unused(PTR_CACHE(c, k, i), b);
483 }
484} 460}
485 461
486int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, 462int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,