aboutsummaryrefslogtreecommitdiffstats
path: root/mm/list_lru.c
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2015-02-12 17:59:04 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-12 21:54:09 -0500
commitff0b67ef5b1687692bc1fd3ce4bc3d1ff83587c7 (patch)
tree9c4b001b4b814b1cfd90c195fe4b425d3342b19c /mm/list_lru.c
parent05257a1a3dcc196c197714b5c9a8dd35b7f6aefc (diff)
list_lru: get rid of ->active_nodes
The active_nodes mask allows us to skip empty nodes when walking over list_lru items from all nodes in list_lru_count/walk. However, these functions are never called from hot paths, so it doesn't seem we need such kind of optimization there. OTOH, removing the mask will make it easier to make list_lru per-memcg. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Greg Thelen <gthelen@google.com> Cc: Glauber Costa <glommer@gmail.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/list_lru.c')
-rw-r--r--mm/list_lru.c10
1 files changed, 3 insertions, 7 deletions
diff --git a/mm/list_lru.c b/mm/list_lru.c
index f1a0db194173..07e198c77888 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -19,8 +19,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
19 WARN_ON_ONCE(nlru->nr_items < 0); 19 WARN_ON_ONCE(nlru->nr_items < 0);
20 if (list_empty(item)) { 20 if (list_empty(item)) {
21 list_add_tail(item, &nlru->list); 21 list_add_tail(item, &nlru->list);
22 if (nlru->nr_items++ == 0) 22 nlru->nr_items++;
23 node_set(nid, lru->active_nodes);
24 spin_unlock(&nlru->lock); 23 spin_unlock(&nlru->lock);
25 return true; 24 return true;
26 } 25 }
@@ -37,8 +36,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item)
37 spin_lock(&nlru->lock); 36 spin_lock(&nlru->lock);
38 if (!list_empty(item)) { 37 if (!list_empty(item)) {
39 list_del_init(item); 38 list_del_init(item);
40 if (--nlru->nr_items == 0) 39 nlru->nr_items--;
41 node_clear(nid, lru->active_nodes);
42 WARN_ON_ONCE(nlru->nr_items < 0); 40 WARN_ON_ONCE(nlru->nr_items < 0);
43 spin_unlock(&nlru->lock); 41 spin_unlock(&nlru->lock);
44 return true; 42 return true;
@@ -90,8 +88,7 @@ restart:
90 case LRU_REMOVED_RETRY: 88 case LRU_REMOVED_RETRY:
91 assert_spin_locked(&nlru->lock); 89 assert_spin_locked(&nlru->lock);
92 case LRU_REMOVED: 90 case LRU_REMOVED:
93 if (--nlru->nr_items == 0) 91 nlru->nr_items--;
94 node_clear(nid, lru->active_nodes);
95 WARN_ON_ONCE(nlru->nr_items < 0); 92 WARN_ON_ONCE(nlru->nr_items < 0);
96 isolated++; 93 isolated++;
97 /* 94 /*
@@ -133,7 +130,6 @@ int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key)
133 if (!lru->node) 130 if (!lru->node)
134 return -ENOMEM; 131 return -ENOMEM;
135 132
136 nodes_clear(lru->active_nodes);
137 for (i = 0; i < nr_node_ids; i++) { 133 for (i = 0; i < nr_node_ids; i++) {
138 spin_lock_init(&lru->node[i].lock); 134 spin_lock_init(&lru->node[i].lock);
139 if (key) 135 if (key)