aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--include/linux/mm.h33
-rw-r--r--include/linux/page-flags.h14
-rw-r--r--mm/internal.h2
-rw-r--r--mm/page_alloc.c29
-rw-r--r--mm/slab.c6
-rw-r--r--mm/slub.c19
-rw-r--r--mm/swap.c2
9 files changed, 72 insertions, 37 deletions
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 4f36987eea72..2da841110727 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -121,7 +121,7 @@ lazy_mmu_prot_update (pte_t pte)
121 return; /* i-cache is already coherent with d-cache */ 121 return; /* i-cache is already coherent with d-cache */
122 122
123 if (PageCompound(page)) { 123 if (PageCompound(page)) {
124 order = (unsigned long) (page[1].lru.prev); 124 order = compound_order(page);
125 flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT)); 125 flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT));
126 } 126 }
127 else 127 else
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 9ba71b252f3e..8e1b7825e2f3 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -450,7 +450,7 @@ static int hugetlbfs_symlink(struct inode *dir,
450 */ 450 */
451static int hugetlbfs_set_page_dirty(struct page *page) 451static int hugetlbfs_set_page_dirty(struct page *page)
452{ 452{
453 struct page *head = (struct page *)page_private(page); 453 struct page *head = compound_head(page);
454 454
455 SetPageDirty(head); 455 SetPageDirty(head);
456 return 0; 456 return 0;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c95d96ebd5ad..8c149fa4491d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -267,17 +267,28 @@ static inline int get_page_unless_zero(struct page *page)
267 return atomic_inc_not_zero(&page->_count); 267 return atomic_inc_not_zero(&page->_count);
268} 268}
269 269
270static inline struct page *compound_head(struct page *page)
271{
272 /*
273 * We could avoid the PageCompound(page) check if
274 * we would not overload PageTail().
275 *
276 * This check has to be done in several performance critical
277 * paths of the slab etc. IMHO PageTail deserves its own flag.
278 */
279 if (unlikely(PageCompound(page) && PageTail(page)))
280 return page->first_page;
281 return page;
282}
283
270static inline int page_count(struct page *page) 284static inline int page_count(struct page *page)
271{ 285{
272 if (unlikely(PageCompound(page))) 286 return atomic_read(&compound_head(page)->_count);
273 page = (struct page *)page_private(page);
274 return atomic_read(&page->_count);
275} 287}
276 288
277static inline void get_page(struct page *page) 289static inline void get_page(struct page *page)
278{ 290{
279 if (unlikely(PageCompound(page))) 291 page = compound_head(page);
280 page = (struct page *)page_private(page);
281 VM_BUG_ON(atomic_read(&page->_count) == 0); 292 VM_BUG_ON(atomic_read(&page->_count) == 0);
282 atomic_inc(&page->_count); 293 atomic_inc(&page->_count);
283} 294}
@@ -314,6 +325,18 @@ static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
314 return (compound_page_dtor *)page[1].lru.next; 325 return (compound_page_dtor *)page[1].lru.next;
315} 326}
316 327
328static inline int compound_order(struct page *page)
329{
330 if (!PageCompound(page) || PageTail(page))
331 return 0;
332 return (unsigned long)page[1].lru.prev;
333}
334
335static inline void set_compound_order(struct page *page, unsigned long order)
336{
337 page[1].lru.prev = (void *)order;
338}
339
317/* 340/*
318 * Multiple processes may "see" the same page. E.g. for untouched 341 * Multiple processes may "see" the same page. E.g. for untouched
319 * mappings of /dev/null, all processes see the same page full of 342 * mappings of /dev/null, all processes see the same page full of
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 96326594e55d..a1e143634946 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -94,6 +94,12 @@
94/* PG_owner_priv_1 users should have descriptive aliases */ 94/* PG_owner_priv_1 users should have descriptive aliases */
95#define PG_checked PG_owner_priv_1 /* Used by some filesystems */ 95#define PG_checked PG_owner_priv_1 /* Used by some filesystems */
96 96
97/*
98 * Marks tail portion of a compound page. We currently do not reclaim
99 * compound pages so we can reuse a flag only used for reclaim here.
100 */
101#define PG_tail PG_reclaim
102
97#if (BITS_PER_LONG > 32) 103#if (BITS_PER_LONG > 32)
98/* 104/*
99 * 64-bit-only flags build down from bit 31 105 * 64-bit-only flags build down from bit 31
@@ -241,6 +247,14 @@ static inline void SetPageUptodate(struct page *page)
241#define __SetPageCompound(page) __set_bit(PG_compound, &(page)->flags) 247#define __SetPageCompound(page) __set_bit(PG_compound, &(page)->flags)
242#define __ClearPageCompound(page) __clear_bit(PG_compound, &(page)->flags) 248#define __ClearPageCompound(page) __clear_bit(PG_compound, &(page)->flags)
243 249
250/*
251 * Note: PG_tail is an alias of another page flag. The result of PageTail()
252 * is only valid if PageCompound(page) is true.
253 */
254#define PageTail(page) test_bit(PG_tail, &(page)->flags)
255#define __SetPageTail(page) __set_bit(PG_tail, &(page)->flags)
256#define __ClearPageTail(page) __clear_bit(PG_tail, &(page)->flags)
257
244#ifdef CONFIG_SWAP 258#ifdef CONFIG_SWAP
245#define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags) 259#define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags)
246#define SetPageSwapCache(page) set_bit(PG_swapcache, &(page)->flags) 260#define SetPageSwapCache(page) set_bit(PG_swapcache, &(page)->flags)
diff --git a/mm/internal.h b/mm/internal.h
index d527b80b292f..a3110c02aea7 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -24,7 +24,7 @@ static inline void set_page_count(struct page *page, int v)
24 */ 24 */
25static inline void set_page_refcounted(struct page *page) 25static inline void set_page_refcounted(struct page *page)
26{ 26{
27 VM_BUG_ON(PageCompound(page) && page_private(page) != (unsigned long)page); 27 VM_BUG_ON(PageCompound(page) && PageTail(page));
28 VM_BUG_ON(atomic_read(&page->_count)); 28 VM_BUG_ON(atomic_read(&page->_count));
29 set_page_count(page, 1); 29 set_page_count(page, 1);
30} 30}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 542fc088ff50..fc241fe295ab 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -225,7 +225,7 @@ static void bad_page(struct page *page)
225 225
226static void free_compound_page(struct page *page) 226static void free_compound_page(struct page *page)
227{ 227{
228 __free_pages_ok(page, (unsigned long)page[1].lru.prev); 228 __free_pages_ok(page, compound_order(page));
229} 229}
230 230
231static void prep_compound_page(struct page *page, unsigned long order) 231static void prep_compound_page(struct page *page, unsigned long order)
@@ -234,12 +234,14 @@ static void prep_compound_page(struct page *page, unsigned long order)
234 int nr_pages = 1 << order; 234 int nr_pages = 1 << order;
235 235
236 set_compound_page_dtor(page, free_compound_page); 236 set_compound_page_dtor(page, free_compound_page);
237 page[1].lru.prev = (void *)order; 237 set_compound_order(page, order);
238 for (i = 0; i < nr_pages; i++) { 238 __SetPageCompound(page);
239 for (i = 1; i < nr_pages; i++) {
239 struct page *p = page + i; 240 struct page *p = page + i;
240 241
242 __SetPageTail(p);
241 __SetPageCompound(p); 243 __SetPageCompound(p);
242 set_page_private(p, (unsigned long)page); 244 p->first_page = page;
243 } 245 }
244} 246}
245 247
@@ -248,15 +250,19 @@ static void destroy_compound_page(struct page *page, unsigned long order)
248 int i; 250 int i;
249 int nr_pages = 1 << order; 251 int nr_pages = 1 << order;
250 252
251 if (unlikely((unsigned long)page[1].lru.prev != order)) 253 if (unlikely(compound_order(page) != order))
252 bad_page(page); 254 bad_page(page);
253 255
254 for (i = 0; i < nr_pages; i++) { 256 if (unlikely(!PageCompound(page)))
257 bad_page(page);
258 __ClearPageCompound(page);
259 for (i = 1; i < nr_pages; i++) {
255 struct page *p = page + i; 260 struct page *p = page + i;
256 261
257 if (unlikely(!PageCompound(p) | 262 if (unlikely(!PageCompound(p) | !PageTail(p) |
258 (page_private(p) != (unsigned long)page))) 263 (p->first_page != page)))
259 bad_page(page); 264 bad_page(page);
265 __ClearPageTail(p);
260 __ClearPageCompound(p); 266 __ClearPageCompound(p);
261 } 267 }
262} 268}
@@ -429,13 +435,18 @@ static inline int free_pages_check(struct page *page)
429 1 << PG_private | 435 1 << PG_private |
430 1 << PG_locked | 436 1 << PG_locked |
431 1 << PG_active | 437 1 << PG_active |
432 1 << PG_reclaim |
433 1 << PG_slab | 438 1 << PG_slab |
434 1 << PG_swapcache | 439 1 << PG_swapcache |
435 1 << PG_writeback | 440 1 << PG_writeback |
436 1 << PG_reserved | 441 1 << PG_reserved |
437 1 << PG_buddy )))) 442 1 << PG_buddy ))))
438 bad_page(page); 443 bad_page(page);
444 /*
445 * PageReclaim == PageTail. It is only an error
446 * for PageReclaim to be set if PageCompound is clear.
447 */
448 if (unlikely(!PageCompound(page) && PageReclaim(page)))
449 bad_page(page);
439 if (PageDirty(page)) 450 if (PageDirty(page))
440 __ClearPageDirty(page); 451 __ClearPageDirty(page);
441 /* 452 /*
diff --git a/mm/slab.c b/mm/slab.c
index 9cd01fa60004..f4b2e22b5c61 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -602,8 +602,7 @@ static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
602 602
603static inline struct kmem_cache *page_get_cache(struct page *page) 603static inline struct kmem_cache *page_get_cache(struct page *page)
604{ 604{
605 if (unlikely(PageCompound(page))) 605 page = compound_head(page);
606 page = (struct page *)page_private(page);
607 BUG_ON(!PageSlab(page)); 606 BUG_ON(!PageSlab(page));
608 return (struct kmem_cache *)page->lru.next; 607 return (struct kmem_cache *)page->lru.next;
609} 608}
@@ -615,8 +614,7 @@ static inline void page_set_slab(struct page *page, struct slab *slab)
615 614
616static inline struct slab *page_get_slab(struct page *page) 615static inline struct slab *page_get_slab(struct page *page)
617{ 616{
618 if (unlikely(PageCompound(page))) 617 page = compound_head(page);
619 page = (struct page *)page_private(page);
620 BUG_ON(!PageSlab(page)); 618 BUG_ON(!PageSlab(page));
621 return (struct slab *)page->lru.prev; 619 return (struct slab *)page->lru.prev;
622} 620}
diff --git a/mm/slub.c b/mm/slub.c
index 9d52cce7c999..8fa1c6e937f5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1325,9 +1325,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
1325 1325
1326 page = virt_to_page(x); 1326 page = virt_to_page(x);
1327 1327
1328 if (unlikely(PageCompound(page))) 1328 page = compound_head(page);
1329 page = page->first_page;
1330
1331 1329
1332 if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER))) 1330 if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER)))
1333 set_tracking(s, x, TRACK_FREE); 1331 set_tracking(s, x, TRACK_FREE);
@@ -1338,10 +1336,7 @@ EXPORT_SYMBOL(kmem_cache_free);
1338/* Figure out on which slab object the object resides */ 1336/* Figure out on which slab object the object resides */
1339static struct page *get_object_page(const void *x) 1337static struct page *get_object_page(const void *x)
1340{ 1338{
1341 struct page *page = virt_to_page(x); 1339 struct page *page = compound_head(virt_to_page(x));
1342
1343 if (unlikely(PageCompound(page)))
1344 page = page->first_page;
1345 1340
1346 if (!PageSlab(page)) 1341 if (!PageSlab(page))
1347 return NULL; 1342 return NULL;
@@ -2081,10 +2076,7 @@ void kfree(const void *x)
2081 if (!x) 2076 if (!x)
2082 return; 2077 return;
2083 2078
2084 page = virt_to_page(x); 2079 page = compound_head(virt_to_page(x));
2085
2086 if (unlikely(PageCompound(page)))
2087 page = page->first_page;
2088 2080
2089 s = page->slab; 2081 s = page->slab;
2090 2082
@@ -2120,10 +2112,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
2120 return NULL; 2112 return NULL;
2121 } 2113 }
2122 2114
2123 page = virt_to_page(p); 2115 page = compound_head(virt_to_page(p));
2124
2125 if (unlikely(PageCompound(page)))
2126 page = page->first_page;
2127 2116
2128 new_cache = get_slab(new_size, flags); 2117 new_cache = get_slab(new_size, flags);
2129 2118
diff --git a/mm/swap.c b/mm/swap.c
index 2ed7be39795e..218c52a24a21 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -55,7 +55,7 @@ static void fastcall __page_cache_release(struct page *page)
55 55
56static void put_compound_page(struct page *page) 56static void put_compound_page(struct page *page)
57{ 57{
58 page = (struct page *)page_private(page); 58 page = compound_head(page);
59 if (put_page_testzero(page)) { 59 if (put_page_testzero(page)) {
60 compound_page_dtor *dtor; 60 compound_page_dtor *dtor;
61 61