diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-12 18:01:38 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-12 18:01:38 -0400 |
commit | 26935fb06ee88f1188789807687c03041f3c70d9 (patch) | |
tree | 381c487716540b52348d78bee6555f8fa61d77ef /include/linux/list_lru.h | |
parent | 3cc69b638e11bfda5d013c2b75b60934aa0e88a1 (diff) | |
parent | bf2ba3bc185269eca274b458aac46ba1ad7c1121 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull vfs pile 4 from Al Viro:
"list_lru pile, mostly"
This came out of Andrew's pile, Al ended up doing the merge work so that
Andrew didn't have to.
Additionally, a few fixes.
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (42 commits)
super: fix for destroy lrus
list_lru: dynamically adjust node arrays
shrinker: Kill old ->shrink API.
shrinker: convert remaining shrinkers to count/scan API
staging/lustre/libcfs: cleanup linux-mem.h
staging/lustre/ptlrpc: convert to new shrinker API
staging/lustre/obdclass: convert lu_object shrinker to count/scan API
staging/lustre/ldlm: convert to shrinkers to count/scan API
hugepage: convert huge zero page shrinker to new shrinker API
i915: bail out earlier when shrinker cannot acquire mutex
drivers: convert shrinkers to new count/scan API
fs: convert fs shrinkers to new scan/count API
xfs: fix dquot isolation hang
xfs-convert-dquot-cache-lru-to-list_lru-fix
xfs: convert dquot cache lru to list_lru
xfs: rework buffer dispose list tracking
xfs-convert-buftarg-lru-to-generic-code-fix
xfs: convert buftarg LRU to generic code
fs: convert inode and dentry shrinking to be node aware
vmscan: per-node deferred work
...
Diffstat (limited to 'include/linux/list_lru.h')
-rw-r--r-- | include/linux/list_lru.h | 131 |
1 files changed, 131 insertions, 0 deletions
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h new file mode 100644 index 000000000000..3ce541753c88 --- /dev/null +++ b/include/linux/list_lru.h | |||
@@ -0,0 +1,131 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. | ||
3 | * Authors: David Chinner and Glauber Costa | ||
4 | * | ||
5 | * Generic LRU infrastructure | ||
6 | */ | ||
7 | #ifndef _LRU_LIST_H | ||
8 | #define _LRU_LIST_H | ||
9 | |||
10 | #include <linux/list.h> | ||
11 | #include <linux/nodemask.h> | ||
12 | |||
13 | /* list_lru_walk_cb has to always return one of those */ | ||
14 | enum lru_status { | ||
15 | LRU_REMOVED, /* item removed from list */ | ||
16 | LRU_ROTATE, /* item referenced, give another pass */ | ||
17 | LRU_SKIP, /* item cannot be locked, skip */ | ||
18 | LRU_RETRY, /* item not freeable. May drop the lock | ||
19 | internally, but has to return locked. */ | ||
20 | }; | ||
21 | |||
22 | struct list_lru_node { | ||
23 | spinlock_t lock; | ||
24 | struct list_head list; | ||
25 | /* kept as signed so we can catch imbalance bugs */ | ||
26 | long nr_items; | ||
27 | } ____cacheline_aligned_in_smp; | ||
28 | |||
29 | struct list_lru { | ||
30 | struct list_lru_node *node; | ||
31 | nodemask_t active_nodes; | ||
32 | }; | ||
33 | |||
34 | void list_lru_destroy(struct list_lru *lru); | ||
35 | int list_lru_init(struct list_lru *lru); | ||
36 | |||
37 | /** | ||
38 | * list_lru_add: add an element to the lru list's tail | ||
39 | * @list_lru: the lru pointer | ||
40 | * @item: the item to be added. | ||
41 | * | ||
42 | * If the element is already part of a list, this function returns doing | ||
43 | * nothing. Therefore the caller does not need to keep state about whether or | ||
44 | * not the element already belongs in the list and is allowed to lazy update | ||
45 | * it. Note however that this is valid for *a* list, not *this* list. If | ||
46 | * the caller organize itself in a way that elements can be in more than | ||
47 | * one type of list, it is up to the caller to fully remove the item from | ||
48 | * the previous list (with list_lru_del() for instance) before moving it | ||
49 | * to @list_lru | ||
50 | * | ||
51 | * Return value: true if the list was updated, false otherwise | ||
52 | */ | ||
53 | bool list_lru_add(struct list_lru *lru, struct list_head *item); | ||
54 | |||
55 | /** | ||
56 | * list_lru_del: delete an element to the lru list | ||
57 | * @list_lru: the lru pointer | ||
58 | * @item: the item to be deleted. | ||
59 | * | ||
60 | * This function works analogously as list_lru_add in terms of list | ||
61 | * manipulation. The comments about an element already pertaining to | ||
62 | * a list are also valid for list_lru_del. | ||
63 | * | ||
64 | * Return value: true if the list was updated, false otherwise | ||
65 | */ | ||
66 | bool list_lru_del(struct list_lru *lru, struct list_head *item); | ||
67 | |||
68 | /** | ||
69 | * list_lru_count_node: return the number of objects currently held by @lru | ||
70 | * @lru: the lru pointer. | ||
71 | * @nid: the node id to count from. | ||
72 | * | ||
73 | * Always return a non-negative number, 0 for empty lists. There is no | ||
74 | * guarantee that the list is not updated while the count is being computed. | ||
75 | * Callers that want such a guarantee need to provide an outer lock. | ||
76 | */ | ||
77 | unsigned long list_lru_count_node(struct list_lru *lru, int nid); | ||
78 | static inline unsigned long list_lru_count(struct list_lru *lru) | ||
79 | { | ||
80 | long count = 0; | ||
81 | int nid; | ||
82 | |||
83 | for_each_node_mask(nid, lru->active_nodes) | ||
84 | count += list_lru_count_node(lru, nid); | ||
85 | |||
86 | return count; | ||
87 | } | ||
88 | |||
89 | typedef enum lru_status | ||
90 | (*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg); | ||
91 | /** | ||
92 | * list_lru_walk_node: walk a list_lru, isolating and disposing freeable items. | ||
93 | * @lru: the lru pointer. | ||
94 | * @nid: the node id to scan from. | ||
95 | * @isolate: callback function that is resposible for deciding what to do with | ||
96 | * the item currently being scanned | ||
97 | * @cb_arg: opaque type that will be passed to @isolate | ||
98 | * @nr_to_walk: how many items to scan. | ||
99 | * | ||
100 | * This function will scan all elements in a particular list_lru, calling the | ||
101 | * @isolate callback for each of those items, along with the current list | ||
102 | * spinlock and a caller-provided opaque. The @isolate callback can choose to | ||
103 | * drop the lock internally, but *must* return with the lock held. The callback | ||
104 | * will return an enum lru_status telling the list_lru infrastructure what to | ||
105 | * do with the object being scanned. | ||
106 | * | ||
107 | * Please note that nr_to_walk does not mean how many objects will be freed, | ||
108 | * just how many objects will be scanned. | ||
109 | * | ||
110 | * Return value: the number of objects effectively removed from the LRU. | ||
111 | */ | ||
112 | unsigned long list_lru_walk_node(struct list_lru *lru, int nid, | ||
113 | list_lru_walk_cb isolate, void *cb_arg, | ||
114 | unsigned long *nr_to_walk); | ||
115 | |||
116 | static inline unsigned long | ||
117 | list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, | ||
118 | void *cb_arg, unsigned long nr_to_walk) | ||
119 | { | ||
120 | long isolated = 0; | ||
121 | int nid; | ||
122 | |||
123 | for_each_node_mask(nid, lru->active_nodes) { | ||
124 | isolated += list_lru_walk_node(lru, nid, isolate, | ||
125 | cb_arg, &nr_to_walk); | ||
126 | if (nr_to_walk <= 0) | ||
127 | break; | ||
128 | } | ||
129 | return isolated; | ||
130 | } | ||
131 | #endif /* _LRU_LIST_H */ | ||