diff options
author | Lars Ellenberg <lars.ellenberg@linbit.com> | 2013-03-23 00:17:36 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2013-03-23 00:17:36 -0400 |
commit | cbe5e6109538ddab57764a88d9f0c2accd0c7d48 (patch) | |
tree | e09809862b9be7a44b44aa7367d470da14b2ccf0 | |
parent | 779b3fe4c0e9dea19ae3ddef0b5fd1a663b63ee6 (diff) |
lru_cache: introduce lc_get_cumulative()
New helper to be able to consolidate more updates
into a single transaction.
Without this, we can only grab a single refcount
on an updated element while preparing a transaction.
lc_get_cumulative - like lc_get; also finds to-be-changed elements
@lc: the lru cache to operate on
@enr: the label to look up
Unlike lc_get this also returns the element for @enr, if it is belonging to
a pending transaction, so the return values are like for lc_get(),
plus:
pointer to an element already on the "to_be_changed" list.
In this case, the cache was already marked %LC_DIRTY.
Caller needs to make sure that the pending transaction is completed,
before proceeding to actually use this element.
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Fixed up by Jens to export lc_get_cumulative().
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | include/linux/lru_cache.h | 1 | ||||
-rw-r--r-- | lib/lru_cache.c | 56 |
2 files changed, 47 insertions, 10 deletions
diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h index 4019013c6593..46262284de47 100644 --- a/include/linux/lru_cache.h +++ b/include/linux/lru_cache.h | |||
@@ -256,6 +256,7 @@ extern void lc_destroy(struct lru_cache *lc); | |||
256 | extern void lc_set(struct lru_cache *lc, unsigned int enr, int index); | 256 | extern void lc_set(struct lru_cache *lc, unsigned int enr, int index); |
257 | extern void lc_del(struct lru_cache *lc, struct lc_element *element); | 257 | extern void lc_del(struct lru_cache *lc, struct lc_element *element); |
258 | 258 | ||
259 | extern struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr); | ||
259 | extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr); | 260 | extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr); |
260 | extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr); | 261 | extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr); |
261 | extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr); | 262 | extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr); |
diff --git a/lib/lru_cache.c b/lib/lru_cache.c index 8335d39d2ccd..4a83ecd03650 100644 --- a/lib/lru_cache.c +++ b/lib/lru_cache.c | |||
@@ -365,7 +365,13 @@ static int lc_unused_element_available(struct lru_cache *lc) | |||
365 | return 0; | 365 | return 0; |
366 | } | 366 | } |
367 | 367 | ||
368 | static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, bool may_change) | 368 | /* used as internal flags to __lc_get */ |
369 | enum { | ||
370 | LC_GET_MAY_CHANGE = 1, | ||
371 | LC_GET_MAY_USE_UNCOMMITTED = 2, | ||
372 | }; | ||
373 | |||
374 | static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, unsigned int flags) | ||
369 | { | 375 | { |
370 | struct lc_element *e; | 376 | struct lc_element *e; |
371 | 377 | ||
@@ -380,22 +386,31 @@ static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, bool | |||
380 | * this enr is currently being pulled in already, | 386 | * this enr is currently being pulled in already, |
381 | * and will be available once the pending transaction | 387 | * and will be available once the pending transaction |
382 | * has been committed. */ | 388 | * has been committed. */ |
383 | if (e && e->lc_new_number == e->lc_number) { | 389 | if (e) { |
390 | if (e->lc_new_number != e->lc_number) { | ||
391 | /* It has been found above, but on the "to_be_changed" | ||
392 | * list, not yet committed. Don't pull it in twice, | ||
393 | * wait for the transaction, then try again... | ||
394 | */ | ||
395 | if (!(flags & LC_GET_MAY_USE_UNCOMMITTED)) | ||
396 | RETURN(NULL); | ||
397 | /* ... unless the caller is aware of the implications, | ||
398 | * probably preparing a cumulative transaction. */ | ||
399 | ++e->refcnt; | ||
400 | ++lc->hits; | ||
401 | RETURN(e); | ||
402 | } | ||
403 | /* else: lc_new_number == lc_number; a real hit. */ | ||
384 | ++lc->hits; | 404 | ++lc->hits; |
385 | if (e->refcnt++ == 0) | 405 | if (e->refcnt++ == 0) |
386 | lc->used++; | 406 | lc->used++; |
387 | list_move(&e->list, &lc->in_use); /* Not evictable... */ | 407 | list_move(&e->list, &lc->in_use); /* Not evictable... */ |
388 | RETURN(e); | 408 | RETURN(e); |
389 | } | 409 | } |
410 | /* e == NULL */ | ||
390 | 411 | ||
391 | ++lc->misses; | 412 | ++lc->misses; |
392 | if (!may_change) | 413 | if (!(flags & LC_GET_MAY_CHANGE)) |
393 | RETURN(NULL); | ||
394 | |||
395 | /* It has been found above, but on the "to_be_changed" list, not yet | ||
396 | * committed. Don't pull it in twice, wait for the transaction, then | ||
397 | * try again */ | ||
398 | if (e) | ||
399 | RETURN(NULL); | 414 | RETURN(NULL); |
400 | 415 | ||
401 | /* To avoid races with lc_try_lock(), first, mark us dirty | 416 | /* To avoid races with lc_try_lock(), first, mark us dirty |
@@ -477,7 +492,27 @@ static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, bool | |||
477 | */ | 492 | */ |
478 | struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr) | 493 | struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr) |
479 | { | 494 | { |
480 | return __lc_get(lc, enr, 1); | 495 | return __lc_get(lc, enr, LC_GET_MAY_CHANGE); |
496 | } | ||
497 | |||
498 | /** | ||
499 | * lc_get_cumulative - like lc_get; also finds to-be-changed elements | ||
500 | * @lc: the lru cache to operate on | ||
501 | * @enr: the label to look up | ||
502 | * | ||
503 | * Unlike lc_get this also returns the element for @enr, if it is belonging to | ||
504 | * a pending transaction, so the return values are like for lc_get(), | ||
505 | * plus: | ||
506 | * | ||
507 | * pointer to an element already on the "to_be_changed" list. | ||
508 | * In this case, the cache was already marked %LC_DIRTY. | ||
509 | * | ||
510 | * Caller needs to make sure that the pending transaction is completed, | ||
511 | * before proceeding to actually use this element. | ||
512 | */ | ||
513 | struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr) | ||
514 | { | ||
515 | return __lc_get(lc, enr, LC_GET_MAY_CHANGE|LC_GET_MAY_USE_UNCOMMITTED); | ||
481 | } | 516 | } |
482 | 517 | ||
483 | /** | 518 | /** |
@@ -648,3 +683,4 @@ EXPORT_SYMBOL(lc_seq_printf_stats); | |||
648 | EXPORT_SYMBOL(lc_seq_dump_details); | 683 | EXPORT_SYMBOL(lc_seq_dump_details); |
649 | EXPORT_SYMBOL(lc_try_lock); | 684 | EXPORT_SYMBOL(lc_try_lock); |
650 | EXPORT_SYMBOL(lc_is_used); | 685 | EXPORT_SYMBOL(lc_is_used); |
686 | EXPORT_SYMBOL(lc_get_cumulative); | ||