aboutsummaryrefslogtreecommitdiffstats
path: root/mm/list_lru.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2013-08-27 20:18:00 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2013-09-10 18:56:30 -0400
commit3b1d58a4c96799eb4c92039e1b851b86f853548a (patch)
tree3d72b6c0506c0a5138ef44dec8ab5c02fd5b29ba /mm/list_lru.c
parentf604156751db77e08afe47ce29fe8f3d51ad9b04 (diff)
list_lru: per-node list infrastructure
Now that we have an LRU list API, we can start to enhance the implementation. This splits the single LRU list into per-node lists and locks to enhance scalability. Items are placed on lists according to the node the memory belongs to. To make scanning the lists efficient, also track whether the per-node lists have entries in them in a active nodemask. Note: We use a fixed-size array for the node LRU, this struct can be very big if MAX_NUMNODES is big. If this becomes a problem this is fixable by turning this into a pointer and dynamically allocating this to nr_node_ids. This quantity is firwmare-provided, and still would provide room for all nodes at the cost of a pointer lookup and an extra allocation. Because that allocation will most likely come from a may very well fail. [glommer@openvz.org: fix warnings, added note about node lru] Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Glauber Costa <glommer@openvz.org> Reviewed-by: Greg Thelen <gthelen@google.com> Acked-by: Mel Gorman <mgorman@suse.de> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm/list_lru.c')
-rw-r--r--mm/list_lru.c146
1 files changed, 111 insertions, 35 deletions
diff --git a/mm/list_lru.c b/mm/list_lru.c
index dd74c5434cd8..1efe4ecc02b1 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -6,41 +6,73 @@
6 */ 6 */
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/mm.h>
9#include <linux/list_lru.h> 10#include <linux/list_lru.h>
10 11
11bool list_lru_add(struct list_lru *lru, struct list_head *item) 12bool list_lru_add(struct list_lru *lru, struct list_head *item)
12{ 13{
13 spin_lock(&lru->lock); 14 int nid = page_to_nid(virt_to_page(item));
15 struct list_lru_node *nlru = &lru->node[nid];
16
17 spin_lock(&nlru->lock);
18 WARN_ON_ONCE(nlru->nr_items < 0);
14 if (list_empty(item)) { 19 if (list_empty(item)) {
15 list_add_tail(item, &lru->list); 20 list_add_tail(item, &nlru->list);
16 lru->nr_items++; 21 if (nlru->nr_items++ == 0)
17 spin_unlock(&lru->lock); 22 node_set(nid, lru->active_nodes);
23 spin_unlock(&nlru->lock);
18 return true; 24 return true;
19 } 25 }
20 spin_unlock(&lru->lock); 26 spin_unlock(&nlru->lock);
21 return false; 27 return false;
22} 28}
23EXPORT_SYMBOL_GPL(list_lru_add); 29EXPORT_SYMBOL_GPL(list_lru_add);
24 30
25bool list_lru_del(struct list_lru *lru, struct list_head *item) 31bool list_lru_del(struct list_lru *lru, struct list_head *item)
26{ 32{
27 spin_lock(&lru->lock); 33 int nid = page_to_nid(virt_to_page(item));
34 struct list_lru_node *nlru = &lru->node[nid];
35
36 spin_lock(&nlru->lock);
28 if (!list_empty(item)) { 37 if (!list_empty(item)) {
29 list_del_init(item); 38 list_del_init(item);
30 lru->nr_items--; 39 if (--nlru->nr_items == 0)
31 spin_unlock(&lru->lock); 40 node_clear(nid, lru->active_nodes);
41 WARN_ON_ONCE(nlru->nr_items < 0);
42 spin_unlock(&nlru->lock);
32 return true; 43 return true;
33 } 44 }
34 spin_unlock(&lru->lock); 45 spin_unlock(&nlru->lock);
35 return false; 46 return false;
36} 47}
37EXPORT_SYMBOL_GPL(list_lru_del); 48EXPORT_SYMBOL_GPL(list_lru_del);
38 49
39unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, 50unsigned long list_lru_count(struct list_lru *lru)
40 void *cb_arg, unsigned long nr_to_walk)
41{ 51{
52 unsigned long count = 0;
53 int nid;
54
55 for_each_node_mask(nid, lru->active_nodes) {
56 struct list_lru_node *nlru = &lru->node[nid];
57
58 spin_lock(&nlru->lock);
59 WARN_ON_ONCE(nlru->nr_items < 0);
60 count += nlru->nr_items;
61 spin_unlock(&nlru->lock);
62 }
63
64 return count;
65}
66EXPORT_SYMBOL_GPL(list_lru_count);
67
68static unsigned long
69list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
70 void *cb_arg, unsigned long *nr_to_walk)
71{
72
73 struct list_lru_node *nlru = &lru->node[nid];
42 struct list_head *item, *n; 74 struct list_head *item, *n;
43 unsigned long removed = 0; 75 unsigned long isolated = 0;
44 /* 76 /*
45 * If we don't keep state of at which pass we are, we can loop at 77 * If we don't keep state of at which pass we are, we can loop at
46 * LRU_RETRY, since we have no guarantees that the caller will be able 78 * LRU_RETRY, since we have no guarantees that the caller will be able
@@ -50,18 +82,20 @@ unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
50 */ 82 */
51 bool first_pass = true; 83 bool first_pass = true;
52 84
53 spin_lock(&lru->lock); 85 spin_lock(&nlru->lock);
54restart: 86restart:
55 list_for_each_safe(item, n, &lru->list) { 87 list_for_each_safe(item, n, &nlru->list) {
56 enum lru_status ret; 88 enum lru_status ret;
57 ret = isolate(item, &lru->lock, cb_arg); 89 ret = isolate(item, &nlru->lock, cb_arg);
58 switch (ret) { 90 switch (ret) {
59 case LRU_REMOVED: 91 case LRU_REMOVED:
60 lru->nr_items--; 92 if (--nlru->nr_items == 0)
61 removed++; 93 node_clear(nid, lru->active_nodes);
94 WARN_ON_ONCE(nlru->nr_items < 0);
95 isolated++;
62 break; 96 break;
63 case LRU_ROTATE: 97 case LRU_ROTATE:
64 list_move_tail(item, &lru->list); 98 list_move_tail(item, &nlru->list);
65 break; 99 break;
66 case LRU_SKIP: 100 case LRU_SKIP:
67 break; 101 break;
@@ -76,42 +110,84 @@ restart:
76 BUG(); 110 BUG();
77 } 111 }
78 112
79 if (nr_to_walk-- == 0) 113 if ((*nr_to_walk)-- == 0)
80 break; 114 break;
81 115
82 } 116 }
83 spin_unlock(&lru->lock); 117
84 return removed; 118 spin_unlock(&nlru->lock);
119 return isolated;
120}
121EXPORT_SYMBOL_GPL(list_lru_walk_node);
122
123unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
124 void *cb_arg, unsigned long nr_to_walk)
125{
126 unsigned long isolated = 0;
127 int nid;
128
129 for_each_node_mask(nid, lru->active_nodes) {
130 isolated += list_lru_walk_node(lru, nid, isolate,
131 cb_arg, &nr_to_walk);
132 if (nr_to_walk <= 0)
133 break;
134 }
135 return isolated;
85} 136}
86EXPORT_SYMBOL_GPL(list_lru_walk); 137EXPORT_SYMBOL_GPL(list_lru_walk);
87 138
88unsigned long list_lru_dispose_all(struct list_lru *lru, 139static unsigned long list_lru_dispose_all_node(struct list_lru *lru, int nid,
89 list_lru_dispose_cb dispose) 140 list_lru_dispose_cb dispose)
90{ 141{
91 unsigned long disposed = 0; 142 struct list_lru_node *nlru = &lru->node[nid];
92 LIST_HEAD(dispose_list); 143 LIST_HEAD(dispose_list);
144 unsigned long disposed = 0;
93 145
94 spin_lock(&lru->lock); 146 spin_lock(&nlru->lock);
95 while (!list_empty(&lru->list)) { 147 while (!list_empty(&nlru->list)) {
96 list_splice_init(&lru->list, &dispose_list); 148 list_splice_init(&nlru->list, &dispose_list);
97 disposed += lru->nr_items; 149 disposed += nlru->nr_items;
98 lru->nr_items = 0; 150 nlru->nr_items = 0;
99 spin_unlock(&lru->lock); 151 node_clear(nid, lru->active_nodes);
152 spin_unlock(&nlru->lock);
100 153
101 dispose(&dispose_list); 154 dispose(&dispose_list);
102 155
103 spin_lock(&lru->lock); 156 spin_lock(&nlru->lock);
104 } 157 }
105 spin_unlock(&lru->lock); 158 spin_unlock(&nlru->lock);
106 return disposed; 159 return disposed;
107} 160}
108 161
162unsigned long list_lru_dispose_all(struct list_lru *lru,
163 list_lru_dispose_cb dispose)
164{
165 unsigned long disposed;
166 unsigned long total = 0;
167 int nid;
168
169 do {
170 disposed = 0;
171 for_each_node_mask(nid, lru->active_nodes) {
172 disposed += list_lru_dispose_all_node(lru, nid,
173 dispose);
174 }
175 total += disposed;
176 } while (disposed != 0);
177
178 return total;
179}
180
109int list_lru_init(struct list_lru *lru) 181int list_lru_init(struct list_lru *lru)
110{ 182{
111 spin_lock_init(&lru->lock); 183 int i;
112 INIT_LIST_HEAD(&lru->list);
113 lru->nr_items = 0;
114 184
185 nodes_clear(lru->active_nodes);
186 for (i = 0; i < MAX_NUMNODES; i++) {
187 spin_lock_init(&lru->node[i].lock);
188 INIT_LIST_HEAD(&lru->node[i].list);
189 lru->node[i].nr_items = 0;
190 }
115 return 0; 191 return 0;
116} 192}
117EXPORT_SYMBOL_GPL(list_lru_init); 193EXPORT_SYMBOL_GPL(list_lru_init);