summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Hansen <dave.hansen@linux.intel.com>2014-04-08 16:44:27 -0400
committerPekka Enberg <penberg@kernel.org>2014-04-11 03:06:06 -0400
commit34bf6ef94a835a8f1d8abd3e7d38c6c08d205867 (patch)
treea5e285e441036ed1d78033192b7eaf74300f4984
parent5f0985bb1123b48bbfc632006bdbe76d3dfea76b (diff)
mm: slab/slub: use page->list consistently instead of page->lru
'struct page' has two list_head fields: 'lru' and 'list'. Conveniently, they are unioned together. This means that code can use them interchangably, which gets horribly confusing like with this nugget from slab.c: > list_del(&page->lru); > if (page->active == cachep->num) > list_add(&page->list, &n->slabs_full); This patch makes the slab and slub code use page->lru universally instead of mixing ->list and ->lru. So, the new rule is: page->lru is what the you use if you want to keep your page on a list. Don't like the fact that it's not called ->list? Too bad. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Christoph Lameter <cl@linux.com> Acked-by: David Rientjes <rientjes@google.com> Cc: Pekka Enberg <penberg@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r--include/linux/mm_types.h3
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slob.c10
3 files changed, 9 insertions, 8 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 290901a8c1de..84b74080beb7 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -124,6 +124,8 @@ struct page {
124 union { 124 union {
125 struct list_head lru; /* Pageout list, eg. active_list 125 struct list_head lru; /* Pageout list, eg. active_list
126 * protected by zone->lru_lock ! 126 * protected by zone->lru_lock !
127 * Can be used as a generic list
128 * by the page owner.
127 */ 129 */
128 struct { /* slub per cpu partial pages */ 130 struct { /* slub per cpu partial pages */
129 struct page *next; /* Next partial slab */ 131 struct page *next; /* Next partial slab */
@@ -136,7 +138,6 @@ struct page {
136#endif 138#endif
137 }; 139 };
138 140
139 struct list_head list; /* slobs list of pages */
140 struct slab *slab_page; /* slab fields */ 141 struct slab *slab_page; /* slab fields */
141 struct rcu_head rcu_head; /* Used by SLAB 142 struct rcu_head rcu_head; /* Used by SLAB
142 * when destroying via RCU 143 * when destroying via RCU
diff --git a/mm/slab.c b/mm/slab.c
index 8dd8e0875e4c..f6718197cdd0 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2922,9 +2922,9 @@ retry:
2922 /* move slabp to correct slabp list: */ 2922 /* move slabp to correct slabp list: */
2923 list_del(&page->lru); 2923 list_del(&page->lru);
2924 if (page->active == cachep->num) 2924 if (page->active == cachep->num)
2925 list_add(&page->list, &n->slabs_full); 2925 list_add(&page->lru, &n->slabs_full);
2926 else 2926 else
2927 list_add(&page->list, &n->slabs_partial); 2927 list_add(&page->lru, &n->slabs_partial);
2928 } 2928 }
2929 2929
2930must_grow: 2930must_grow:
diff --git a/mm/slob.c b/mm/slob.c
index 4bf8809dfcce..730cad45d4be 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -111,13 +111,13 @@ static inline int slob_page_free(struct page *sp)
111 111
112static void set_slob_page_free(struct page *sp, struct list_head *list) 112static void set_slob_page_free(struct page *sp, struct list_head *list)
113{ 113{
114 list_add(&sp->list, list); 114 list_add(&sp->lru, list);
115 __SetPageSlobFree(sp); 115 __SetPageSlobFree(sp);
116} 116}
117 117
118static inline void clear_slob_page_free(struct page *sp) 118static inline void clear_slob_page_free(struct page *sp)
119{ 119{
120 list_del(&sp->list); 120 list_del(&sp->lru);
121 __ClearPageSlobFree(sp); 121 __ClearPageSlobFree(sp);
122} 122}
123 123
@@ -282,7 +282,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
282 282
283 spin_lock_irqsave(&slob_lock, flags); 283 spin_lock_irqsave(&slob_lock, flags);
284 /* Iterate through each partially free page, try to find room */ 284 /* Iterate through each partially free page, try to find room */
285 list_for_each_entry(sp, slob_list, list) { 285 list_for_each_entry(sp, slob_list, lru) {
286#ifdef CONFIG_NUMA 286#ifdef CONFIG_NUMA
287 /* 287 /*
288 * If there's a node specification, search for a partial 288 * If there's a node specification, search for a partial
@@ -296,7 +296,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
296 continue; 296 continue;
297 297
298 /* Attempt to alloc */ 298 /* Attempt to alloc */
299 prev = sp->list.prev; 299 prev = sp->lru.prev;
300 b = slob_page_alloc(sp, size, align); 300 b = slob_page_alloc(sp, size, align);
301 if (!b) 301 if (!b)
302 continue; 302 continue;
@@ -322,7 +322,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
322 spin_lock_irqsave(&slob_lock, flags); 322 spin_lock_irqsave(&slob_lock, flags);
323 sp->units = SLOB_UNITS(PAGE_SIZE); 323 sp->units = SLOB_UNITS(PAGE_SIZE);
324 sp->freelist = b; 324 sp->freelist = b;
325 INIT_LIST_HEAD(&sp->list); 325 INIT_LIST_HEAD(&sp->lru);
326 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); 326 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
327 set_slob_page_free(sp, slob_list); 327 set_slob_page_free(sp, slob_list);
328 b = slob_page_alloc(sp, size, align); 328 b = slob_page_alloc(sp, size, align);