diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-05-06 17:49:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-07 15:12:53 -0400 |
commit | d85f33855c303acfa87fa457157cef755b6087df (patch) | |
tree | f1184a1a24b432727b0399594ede37c7539db888 /mm | |
parent | 30520864839dc796fd314812e7036e754880b47d (diff) |
Make page->private usable in compound pages
If we add a new flag so that we can distinguish between the first page and the
tail pages then we can avoid to use page->private in the first page.
page->private == page for the first page, so there is no real information in
there.
Freeing up page->private makes the use of compound pages more transparent.
They become more usable like real pages. Right now we have to be careful f.e.
if we are going beyond PAGE_SIZE allocations in the slab on i386 because we
can then no longer use the private field. This is one of the issues that
cause us not to support debugging for page size slabs in SLAB.
Having page->private available for SLUB would allow more meta information in
the page struct. I can probably avoid the 16 bit ints that I have in there
right now.
Also if page->private is available then a compound page may be equipped with
buffer heads. This may free up the way for filesystems to support larger
blocks than page size.
We add PageTail as an alias of PageReclaim. Compound pages cannot currently
be reclaimed. Because of the alias one needs to check PageCompound first.
The RFC for the this approach was discussed at
http://marc.info/?t=117574302800001&r=1&w=2
[nacc@us.ibm.com: fix hugetlbfs]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/internal.h | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 29 | ||||
-rw-r--r-- | mm/slab.c | 6 | ||||
-rw-r--r-- | mm/slub.c | 19 | ||||
-rw-r--r-- | mm/swap.c | 2 |
5 files changed, 28 insertions, 30 deletions
diff --git a/mm/internal.h b/mm/internal.h index d527b80b292f..a3110c02aea7 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -24,7 +24,7 @@ static inline void set_page_count(struct page *page, int v) | |||
24 | */ | 24 | */ |
25 | static inline void set_page_refcounted(struct page *page) | 25 | static inline void set_page_refcounted(struct page *page) |
26 | { | 26 | { |
27 | VM_BUG_ON(PageCompound(page) && page_private(page) != (unsigned long)page); | 27 | VM_BUG_ON(PageCompound(page) && PageTail(page)); |
28 | VM_BUG_ON(atomic_read(&page->_count)); | 28 | VM_BUG_ON(atomic_read(&page->_count)); |
29 | set_page_count(page, 1); | 29 | set_page_count(page, 1); |
30 | } | 30 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 542fc088ff50..fc241fe295ab 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -225,7 +225,7 @@ static void bad_page(struct page *page) | |||
225 | 225 | ||
226 | static void free_compound_page(struct page *page) | 226 | static void free_compound_page(struct page *page) |
227 | { | 227 | { |
228 | __free_pages_ok(page, (unsigned long)page[1].lru.prev); | 228 | __free_pages_ok(page, compound_order(page)); |
229 | } | 229 | } |
230 | 230 | ||
231 | static void prep_compound_page(struct page *page, unsigned long order) | 231 | static void prep_compound_page(struct page *page, unsigned long order) |
@@ -234,12 +234,14 @@ static void prep_compound_page(struct page *page, unsigned long order) | |||
234 | int nr_pages = 1 << order; | 234 | int nr_pages = 1 << order; |
235 | 235 | ||
236 | set_compound_page_dtor(page, free_compound_page); | 236 | set_compound_page_dtor(page, free_compound_page); |
237 | page[1].lru.prev = (void *)order; | 237 | set_compound_order(page, order); |
238 | for (i = 0; i < nr_pages; i++) { | 238 | __SetPageCompound(page); |
239 | for (i = 1; i < nr_pages; i++) { | ||
239 | struct page *p = page + i; | 240 | struct page *p = page + i; |
240 | 241 | ||
242 | __SetPageTail(p); | ||
241 | __SetPageCompound(p); | 243 | __SetPageCompound(p); |
242 | set_page_private(p, (unsigned long)page); | 244 | p->first_page = page; |
243 | } | 245 | } |
244 | } | 246 | } |
245 | 247 | ||
@@ -248,15 +250,19 @@ static void destroy_compound_page(struct page *page, unsigned long order) | |||
248 | int i; | 250 | int i; |
249 | int nr_pages = 1 << order; | 251 | int nr_pages = 1 << order; |
250 | 252 | ||
251 | if (unlikely((unsigned long)page[1].lru.prev != order)) | 253 | if (unlikely(compound_order(page) != order)) |
252 | bad_page(page); | 254 | bad_page(page); |
253 | 255 | ||
254 | for (i = 0; i < nr_pages; i++) { | 256 | if (unlikely(!PageCompound(page))) |
257 | bad_page(page); | ||
258 | __ClearPageCompound(page); | ||
259 | for (i = 1; i < nr_pages; i++) { | ||
255 | struct page *p = page + i; | 260 | struct page *p = page + i; |
256 | 261 | ||
257 | if (unlikely(!PageCompound(p) | | 262 | if (unlikely(!PageCompound(p) | !PageTail(p) | |
258 | (page_private(p) != (unsigned long)page))) | 263 | (p->first_page != page))) |
259 | bad_page(page); | 264 | bad_page(page); |
265 | __ClearPageTail(p); | ||
260 | __ClearPageCompound(p); | 266 | __ClearPageCompound(p); |
261 | } | 267 | } |
262 | } | 268 | } |
@@ -429,13 +435,18 @@ static inline int free_pages_check(struct page *page) | |||
429 | 1 << PG_private | | 435 | 1 << PG_private | |
430 | 1 << PG_locked | | 436 | 1 << PG_locked | |
431 | 1 << PG_active | | 437 | 1 << PG_active | |
432 | 1 << PG_reclaim | | ||
433 | 1 << PG_slab | | 438 | 1 << PG_slab | |
434 | 1 << PG_swapcache | | 439 | 1 << PG_swapcache | |
435 | 1 << PG_writeback | | 440 | 1 << PG_writeback | |
436 | 1 << PG_reserved | | 441 | 1 << PG_reserved | |
437 | 1 << PG_buddy )))) | 442 | 1 << PG_buddy )))) |
438 | bad_page(page); | 443 | bad_page(page); |
444 | /* | ||
445 | * PageReclaim == PageTail. It is only an error | ||
446 | * for PageReclaim to be set if PageCompound is clear. | ||
447 | */ | ||
448 | if (unlikely(!PageCompound(page) && PageReclaim(page))) | ||
449 | bad_page(page); | ||
439 | if (PageDirty(page)) | 450 | if (PageDirty(page)) |
440 | __ClearPageDirty(page); | 451 | __ClearPageDirty(page); |
441 | /* | 452 | /* |
@@ -602,8 +602,7 @@ static inline void page_set_cache(struct page *page, struct kmem_cache *cache) | |||
602 | 602 | ||
603 | static inline struct kmem_cache *page_get_cache(struct page *page) | 603 | static inline struct kmem_cache *page_get_cache(struct page *page) |
604 | { | 604 | { |
605 | if (unlikely(PageCompound(page))) | 605 | page = compound_head(page); |
606 | page = (struct page *)page_private(page); | ||
607 | BUG_ON(!PageSlab(page)); | 606 | BUG_ON(!PageSlab(page)); |
608 | return (struct kmem_cache *)page->lru.next; | 607 | return (struct kmem_cache *)page->lru.next; |
609 | } | 608 | } |
@@ -615,8 +614,7 @@ static inline void page_set_slab(struct page *page, struct slab *slab) | |||
615 | 614 | ||
616 | static inline struct slab *page_get_slab(struct page *page) | 615 | static inline struct slab *page_get_slab(struct page *page) |
617 | { | 616 | { |
618 | if (unlikely(PageCompound(page))) | 617 | page = compound_head(page); |
619 | page = (struct page *)page_private(page); | ||
620 | BUG_ON(!PageSlab(page)); | 618 | BUG_ON(!PageSlab(page)); |
621 | return (struct slab *)page->lru.prev; | 619 | return (struct slab *)page->lru.prev; |
622 | } | 620 | } |
@@ -1325,9 +1325,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x) | |||
1325 | 1325 | ||
1326 | page = virt_to_page(x); | 1326 | page = virt_to_page(x); |
1327 | 1327 | ||
1328 | if (unlikely(PageCompound(page))) | 1328 | page = compound_head(page); |
1329 | page = page->first_page; | ||
1330 | |||
1331 | 1329 | ||
1332 | if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER))) | 1330 | if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER))) |
1333 | set_tracking(s, x, TRACK_FREE); | 1331 | set_tracking(s, x, TRACK_FREE); |
@@ -1338,10 +1336,7 @@ EXPORT_SYMBOL(kmem_cache_free); | |||
1338 | /* Figure out on which slab object the object resides */ | 1336 | /* Figure out on which slab object the object resides */ |
1339 | static struct page *get_object_page(const void *x) | 1337 | static struct page *get_object_page(const void *x) |
1340 | { | 1338 | { |
1341 | struct page *page = virt_to_page(x); | 1339 | struct page *page = compound_head(virt_to_page(x)); |
1342 | |||
1343 | if (unlikely(PageCompound(page))) | ||
1344 | page = page->first_page; | ||
1345 | 1340 | ||
1346 | if (!PageSlab(page)) | 1341 | if (!PageSlab(page)) |
1347 | return NULL; | 1342 | return NULL; |
@@ -2081,10 +2076,7 @@ void kfree(const void *x) | |||
2081 | if (!x) | 2076 | if (!x) |
2082 | return; | 2077 | return; |
2083 | 2078 | ||
2084 | page = virt_to_page(x); | 2079 | page = compound_head(virt_to_page(x)); |
2085 | |||
2086 | if (unlikely(PageCompound(page))) | ||
2087 | page = page->first_page; | ||
2088 | 2080 | ||
2089 | s = page->slab; | 2081 | s = page->slab; |
2090 | 2082 | ||
@@ -2120,10 +2112,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) | |||
2120 | return NULL; | 2112 | return NULL; |
2121 | } | 2113 | } |
2122 | 2114 | ||
2123 | page = virt_to_page(p); | 2115 | page = compound_head(virt_to_page(p)); |
2124 | |||
2125 | if (unlikely(PageCompound(page))) | ||
2126 | page = page->first_page; | ||
2127 | 2116 | ||
2128 | new_cache = get_slab(new_size, flags); | 2117 | new_cache = get_slab(new_size, flags); |
2129 | 2118 | ||
@@ -55,7 +55,7 @@ static void fastcall __page_cache_release(struct page *page) | |||
55 | 55 | ||
56 | static void put_compound_page(struct page *page) | 56 | static void put_compound_page(struct page *page) |
57 | { | 57 | { |
58 | page = (struct page *)page_private(page); | 58 | page = compound_head(page); |
59 | if (put_page_testzero(page)) { | 59 | if (put_page_testzero(page)) { |
60 | compound_page_dtor *dtor; | 60 | compound_page_dtor *dtor; |
61 | 61 | ||