aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-05-06 17:49:39 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:12:53 -0400
commitd85f33855c303acfa87fa457157cef755b6087df (patch)
treef1184a1a24b432727b0399594ede37c7539db888 /mm/slub.c
parent30520864839dc796fd314812e7036e754880b47d (diff)
Make page->private usable in compound pages
If we add a new flag so that we can distinguish between the first page and the tail pages then we can avoid to use page->private in the first page. page->private == page for the first page, so there is no real information in there. Freeing up page->private makes the use of compound pages more transparent. They become more usable like real pages. Right now we have to be careful f.e. if we are going beyond PAGE_SIZE allocations in the slab on i386 because we can then no longer use the private field. This is one of the issues that cause us not to support debugging for page size slabs in SLAB. Having page->private available for SLUB would allow more meta information in the page struct. I can probably avoid the 16 bit ints that I have in there right now. Also if page->private is available then a compound page may be equipped with buffer heads. This may free up the way for filesystems to support larger blocks than page size. We add PageTail as an alias of PageReclaim. Compound pages cannot currently be reclaimed. Because of the alias one needs to check PageCompound first. The RFC for the this approach was discussed at http://marc.info/?t=117574302800001&r=1&w=2 [nacc@us.ibm.com: fix hugetlbfs] Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c19
1 files changed, 4 insertions, 15 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 9d52cce7c999..8fa1c6e937f5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1325,9 +1325,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
1325 1325
1326 page = virt_to_page(x); 1326 page = virt_to_page(x);
1327 1327
1328 if (unlikely(PageCompound(page))) 1328 page = compound_head(page);
1329 page = page->first_page;
1330
1331 1329
1332 if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER))) 1330 if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER)))
1333 set_tracking(s, x, TRACK_FREE); 1331 set_tracking(s, x, TRACK_FREE);
@@ -1338,10 +1336,7 @@ EXPORT_SYMBOL(kmem_cache_free);
1338/* Figure out on which slab object the object resides */ 1336/* Figure out on which slab object the object resides */
1339static struct page *get_object_page(const void *x) 1337static struct page *get_object_page(const void *x)
1340{ 1338{
1341 struct page *page = virt_to_page(x); 1339 struct page *page = compound_head(virt_to_page(x));
1342
1343 if (unlikely(PageCompound(page)))
1344 page = page->first_page;
1345 1340
1346 if (!PageSlab(page)) 1341 if (!PageSlab(page))
1347 return NULL; 1342 return NULL;
@@ -2081,10 +2076,7 @@ void kfree(const void *x)
2081 if (!x) 2076 if (!x)
2082 return; 2077 return;
2083 2078
2084 page = virt_to_page(x); 2079 page = compound_head(virt_to_page(x));
2085
2086 if (unlikely(PageCompound(page)))
2087 page = page->first_page;
2088 2080
2089 s = page->slab; 2081 s = page->slab;
2090 2082
@@ -2120,10 +2112,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
2120 return NULL; 2112 return NULL;
2121 } 2113 }
2122 2114
2123 page = virt_to_page(p); 2115 page = compound_head(virt_to_page(p));
2124
2125 if (unlikely(PageCompound(page)))
2126 page = page->first_page;
2127 2116
2128 new_cache = get_slab(new_size, flags); 2117 new_cache = get_slab(new_size, flags);
2129 2118