aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-10-16 04:26:06 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:43:01 -0400
commit8e65d24c7caf2a4c69b3ae0ce170bf3082ba359f (patch)
tree4f690448c1363bf02f74abd9293126c3e3a9e4c9
parentdfb4f09609827301740ef0a11b37530d190f1681 (diff)
SLUB: Do not use page->mapping
After moving the lockless_freelist to kmem_cache_cpu we no longer need page->lockless_freelist. Restructure the use of the struct page fields in such a way that we never touch the mapping field. This is turn allows us to remove the special casing of SLUB when determining the mapping of a page (needed for corner cases of virtual caches machines that need to flush caches of processors mapping a page). Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/mm_types.h9
-rw-r--r--mm/slub.c2
3 files changed, 2 insertions, 13 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6a68d41444f8..292c68623759 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -568,10 +568,6 @@ static inline struct address_space *page_mapping(struct page *page)
568 VM_BUG_ON(PageSlab(page)); 568 VM_BUG_ON(PageSlab(page));
569 if (unlikely(PageSwapCache(page))) 569 if (unlikely(PageSwapCache(page)))
570 mapping = &swapper_space; 570 mapping = &swapper_space;
571#ifdef CONFIG_SLUB
572 else if (unlikely(PageSlab(page)))
573 mapping = NULL;
574#endif
575 else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) 571 else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
576 mapping = NULL; 572 mapping = NULL;
577 return mapping; 573 return mapping;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 145b3d053048..0cdc8fbf6431 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -62,13 +62,8 @@ struct page {
62#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS 62#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
63 spinlock_t ptl; 63 spinlock_t ptl;
64#endif 64#endif
65 struct { /* SLUB uses */ 65 struct kmem_cache *slab; /* SLUB: Pointer to slab */
66 void **lockless_freelist; 66 struct page *first_page; /* Compound tail pages */
67 struct kmem_cache *slab; /* Pointer to slab */
68 };
69 struct {
70 struct page *first_page; /* Compound pages */
71 };
72 }; 67 };
73 union { 68 union {
74 pgoff_t index; /* Our offset within mapping. */ 69 pgoff_t index; /* Our offset within mapping. */
diff --git a/mm/slub.c b/mm/slub.c
index 4b8037f14fce..aa8bb072651b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1127,7 +1127,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1127 set_freepointer(s, last, NULL); 1127 set_freepointer(s, last, NULL);
1128 1128
1129 page->freelist = start; 1129 page->freelist = start;
1130 page->lockless_freelist = NULL;
1131 page->inuse = 0; 1130 page->inuse = 0;
1132out: 1131out:
1133 if (flags & __GFP_WAIT) 1132 if (flags & __GFP_WAIT)
@@ -1153,7 +1152,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1153 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1152 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1154 - pages); 1153 - pages);
1155 1154
1156 page->mapping = NULL;
1157 __free_pages(page, s->order); 1155 __free_pages(page, s->order);
1158} 1156}
1159 1157