diff options
| author | Dave Chinner <dchinner@redhat.com> | 2013-08-27 20:17:58 -0400 |
|---|---|---|
| committer | Al Viro <viro@zeniv.linux.org.uk> | 2013-09-10 18:56:30 -0400 |
| commit | a38e40824844a5ec85f3ea95632be953477d2afa (patch) | |
| tree | 5f5df05ea253689cd515ef0ce47c6baf2210f094 /include/linux | |
| parent | 0a234c6dcb79a270803f5c9773ed650b78730962 (diff) | |
list: add a new LRU list type
Several subsystems use the same construct for LRU lists - a list head, a
spin lock and and item count. They also use exactly the same code for
adding and removing items from the LRU. Create a generic type for these
LRU lists.
This is the beginning of generic, node aware LRUs for shrinkers to work
with.
[glommer@openvz.org: enum defined constants for lru. Suggested by gthelen, don't relock over retry]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Glauber Costa <glommer@openvz.org>
Reviewed-by: Greg Thelen <gthelen@google.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/list_lru.h | 115 |
1 files changed, 115 insertions, 0 deletions
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h new file mode 100644 index 000000000000..1a548b0b7578 --- /dev/null +++ b/include/linux/list_lru.h | |||
| @@ -0,0 +1,115 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. | ||
| 3 | * Authors: David Chinner and Glauber Costa | ||
| 4 | * | ||
| 5 | * Generic LRU infrastructure | ||
| 6 | */ | ||
| 7 | #ifndef _LRU_LIST_H | ||
| 8 | #define _LRU_LIST_H | ||
| 9 | |||
| 10 | #include <linux/list.h> | ||
| 11 | |||
| 12 | /* list_lru_walk_cb has to always return one of those */ | ||
| 13 | enum lru_status { | ||
| 14 | LRU_REMOVED, /* item removed from list */ | ||
| 15 | LRU_ROTATE, /* item referenced, give another pass */ | ||
| 16 | LRU_SKIP, /* item cannot be locked, skip */ | ||
| 17 | LRU_RETRY, /* item not freeable. May drop the lock | ||
| 18 | internally, but has to return locked. */ | ||
| 19 | }; | ||
| 20 | |||
| 21 | struct list_lru { | ||
| 22 | spinlock_t lock; | ||
| 23 | struct list_head list; | ||
| 24 | /* kept as signed so we can catch imbalance bugs */ | ||
| 25 | long nr_items; | ||
| 26 | }; | ||
| 27 | |||
| 28 | int list_lru_init(struct list_lru *lru); | ||
| 29 | |||
| 30 | /** | ||
| 31 | * list_lru_add: add an element to the lru list's tail | ||
| 32 | * @list_lru: the lru pointer | ||
| 33 | * @item: the item to be added. | ||
| 34 | * | ||
| 35 | * If the element is already part of a list, this function returns doing | ||
| 36 | * nothing. Therefore the caller does not need to keep state about whether or | ||
| 37 | * not the element already belongs in the list and is allowed to lazy update | ||
| 38 | * it. Note however that this is valid for *a* list, not *this* list. If | ||
| 39 | * the caller organize itself in a way that elements can be in more than | ||
| 40 | * one type of list, it is up to the caller to fully remove the item from | ||
| 41 | * the previous list (with list_lru_del() for instance) before moving it | ||
| 42 | * to @list_lru | ||
| 43 | * | ||
| 44 | * Return value: true if the list was updated, false otherwise | ||
| 45 | */ | ||
| 46 | bool list_lru_add(struct list_lru *lru, struct list_head *item); | ||
| 47 | |||
| 48 | /** | ||
| 49 | * list_lru_del: delete an element to the lru list | ||
| 50 | * @list_lru: the lru pointer | ||
| 51 | * @item: the item to be deleted. | ||
| 52 | * | ||
| 53 | * This function works analogously as list_lru_add in terms of list | ||
| 54 | * manipulation. The comments about an element already pertaining to | ||
| 55 | * a list are also valid for list_lru_del. | ||
| 56 | * | ||
| 57 | * Return value: true if the list was updated, false otherwise | ||
| 58 | */ | ||
| 59 | bool list_lru_del(struct list_lru *lru, struct list_head *item); | ||
| 60 | |||
| 61 | /** | ||
| 62 | * list_lru_count: return the number of objects currently held by @lru | ||
| 63 | * @lru: the lru pointer. | ||
| 64 | * | ||
| 65 | * Always return a non-negative number, 0 for empty lists. There is no | ||
| 66 | * guarantee that the list is not updated while the count is being computed. | ||
| 67 | * Callers that want such a guarantee need to provide an outer lock. | ||
| 68 | */ | ||
| 69 | static inline unsigned long list_lru_count(struct list_lru *lru) | ||
| 70 | { | ||
| 71 | return lru->nr_items; | ||
| 72 | } | ||
| 73 | |||
| 74 | typedef enum lru_status | ||
| 75 | (*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg); | ||
| 76 | /** | ||
| 77 | * list_lru_walk: walk a list_lru, isolating and disposing freeable items. | ||
| 78 | * @lru: the lru pointer. | ||
| 79 | * @isolate: callback function that is resposible for deciding what to do with | ||
| 80 | * the item currently being scanned | ||
| 81 | * @cb_arg: opaque type that will be passed to @isolate | ||
| 82 | * @nr_to_walk: how many items to scan. | ||
| 83 | * | ||
| 84 | * This function will scan all elements in a particular list_lru, calling the | ||
| 85 | * @isolate callback for each of those items, along with the current list | ||
| 86 | * spinlock and a caller-provided opaque. The @isolate callback can choose to | ||
| 87 | * drop the lock internally, but *must* return with the lock held. The callback | ||
| 88 | * will return an enum lru_status telling the list_lru infrastructure what to | ||
| 89 | * do with the object being scanned. | ||
| 90 | * | ||
| 91 | * Please note that nr_to_walk does not mean how many objects will be freed, | ||
| 92 | * just how many objects will be scanned. | ||
| 93 | * | ||
| 94 | * Return value: the number of objects effectively removed from the LRU. | ||
| 95 | */ | ||
| 96 | unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, | ||
| 97 | void *cb_arg, unsigned long nr_to_walk); | ||
| 98 | |||
| 99 | typedef void (*list_lru_dispose_cb)(struct list_head *dispose_list); | ||
| 100 | /** | ||
| 101 | * list_lru_dispose_all: forceably flush all elements in an @lru | ||
| 102 | * @lru: the lru pointer | ||
| 103 | * @dispose: callback function to be called for each lru list. | ||
| 104 | * | ||
| 105 | * This function will forceably isolate all elements into the dispose list, and | ||
| 106 | * call the @dispose callback to flush the list. Please note that the callback | ||
| 107 | * should expect items in any state, clean or dirty, and be able to flush all of | ||
| 108 | * them. | ||
| 109 | * | ||
| 110 | * Return value: how many objects were freed. It should be equal to all objects | ||
| 111 | * in the list_lru. | ||
| 112 | */ | ||
| 113 | unsigned long | ||
| 114 | list_lru_dispose_all(struct list_lru *lru, list_lru_dispose_cb dispose); | ||
| 115 | #endif /* _LRU_LIST_H */ | ||
