diff options
author | Vladimir Davydov <vdavydov@parallels.com> | 2015-02-12 17:59:04 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-12 21:54:09 -0500 |
commit | ff0b67ef5b1687692bc1fd3ce4bc3d1ff83587c7 (patch) | |
tree | 9c4b001b4b814b1cfd90c195fe4b425d3342b19c | |
parent | 05257a1a3dcc196c197714b5c9a8dd35b7f6aefc (diff) |
list_lru: get rid of ->active_nodes
The active_nodes mask allows us to skip empty nodes when walking over
list_lru items from all nodes in list_lru_count/walk. However, these
functions are never called from hot paths, so it doesn't seem we need
such kind of optimization there. OTOH, removing the mask will make it
easier to make list_lru per-memcg.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Greg Thelen <gthelen@google.com>
Cc: Glauber Costa <glommer@gmail.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/list_lru.h | 5 | ||||
-rw-r--r-- | mm/list_lru.c | 10 |
2 files changed, 5 insertions, 10 deletions
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index f500a2e39b13..53c1d6b78270 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h | |||
@@ -31,7 +31,6 @@ struct list_lru_node { | |||
31 | 31 | ||
32 | struct list_lru { | 32 | struct list_lru { |
33 | struct list_lru_node *node; | 33 | struct list_lru_node *node; |
34 | nodemask_t active_nodes; | ||
35 | }; | 34 | }; |
36 | 35 | ||
37 | void list_lru_destroy(struct list_lru *lru); | 36 | void list_lru_destroy(struct list_lru *lru); |
@@ -94,7 +93,7 @@ static inline unsigned long list_lru_count(struct list_lru *lru) | |||
94 | long count = 0; | 93 | long count = 0; |
95 | int nid; | 94 | int nid; |
96 | 95 | ||
97 | for_each_node_mask(nid, lru->active_nodes) | 96 | for_each_node_state(nid, N_NORMAL_MEMORY) |
98 | count += list_lru_count_node(lru, nid); | 97 | count += list_lru_count_node(lru, nid); |
99 | 98 | ||
100 | return count; | 99 | return count; |
@@ -142,7 +141,7 @@ list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, | |||
142 | long isolated = 0; | 141 | long isolated = 0; |
143 | int nid; | 142 | int nid; |
144 | 143 | ||
145 | for_each_node_mask(nid, lru->active_nodes) { | 144 | for_each_node_state(nid, N_NORMAL_MEMORY) { |
146 | isolated += list_lru_walk_node(lru, nid, isolate, | 145 | isolated += list_lru_walk_node(lru, nid, isolate, |
147 | cb_arg, &nr_to_walk); | 146 | cb_arg, &nr_to_walk); |
148 | if (nr_to_walk <= 0) | 147 | if (nr_to_walk <= 0) |
diff --git a/mm/list_lru.c b/mm/list_lru.c index f1a0db194173..07e198c77888 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c | |||
@@ -19,8 +19,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item) | |||
19 | WARN_ON_ONCE(nlru->nr_items < 0); | 19 | WARN_ON_ONCE(nlru->nr_items < 0); |
20 | if (list_empty(item)) { | 20 | if (list_empty(item)) { |
21 | list_add_tail(item, &nlru->list); | 21 | list_add_tail(item, &nlru->list); |
22 | if (nlru->nr_items++ == 0) | 22 | nlru->nr_items++; |
23 | node_set(nid, lru->active_nodes); | ||
24 | spin_unlock(&nlru->lock); | 23 | spin_unlock(&nlru->lock); |
25 | return true; | 24 | return true; |
26 | } | 25 | } |
@@ -37,8 +36,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item) | |||
37 | spin_lock(&nlru->lock); | 36 | spin_lock(&nlru->lock); |
38 | if (!list_empty(item)) { | 37 | if (!list_empty(item)) { |
39 | list_del_init(item); | 38 | list_del_init(item); |
40 | if (--nlru->nr_items == 0) | 39 | nlru->nr_items--; |
41 | node_clear(nid, lru->active_nodes); | ||
42 | WARN_ON_ONCE(nlru->nr_items < 0); | 40 | WARN_ON_ONCE(nlru->nr_items < 0); |
43 | spin_unlock(&nlru->lock); | 41 | spin_unlock(&nlru->lock); |
44 | return true; | 42 | return true; |
@@ -90,8 +88,7 @@ restart: | |||
90 | case LRU_REMOVED_RETRY: | 88 | case LRU_REMOVED_RETRY: |
91 | assert_spin_locked(&nlru->lock); | 89 | assert_spin_locked(&nlru->lock); |
92 | case LRU_REMOVED: | 90 | case LRU_REMOVED: |
93 | if (--nlru->nr_items == 0) | 91 | nlru->nr_items--; |
94 | node_clear(nid, lru->active_nodes); | ||
95 | WARN_ON_ONCE(nlru->nr_items < 0); | 92 | WARN_ON_ONCE(nlru->nr_items < 0); |
96 | isolated++; | 93 | isolated++; |
97 | /* | 94 | /* |
@@ -133,7 +130,6 @@ int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key) | |||
133 | if (!lru->node) | 130 | if (!lru->node) |
134 | return -ENOMEM; | 131 | return -ENOMEM; |
135 | 132 | ||
136 | nodes_clear(lru->active_nodes); | ||
137 | for (i = 0; i < nr_node_ids; i++) { | 133 | for (i = 0; i < nr_node_ids; i++) { |
138 | spin_lock_init(&lru->node[i].lock); | 134 | spin_lock_init(&lru->node[i].lock); |
139 | if (key) | 135 | if (key) |