aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/internal.h2
-rw-r--r--mm/page_alloc.c29
-rw-r--r--mm/slab.c6
-rw-r--r--mm/slub.c19
-rw-r--r--mm/swap.c2
5 files changed, 28 insertions, 30 deletions
diff --git a/mm/internal.h b/mm/internal.h
index d527b80b292f..a3110c02aea7 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -24,7 +24,7 @@ static inline void set_page_count(struct page *page, int v)
24 */ 24 */
25static inline void set_page_refcounted(struct page *page) 25static inline void set_page_refcounted(struct page *page)
26{ 26{
27 VM_BUG_ON(PageCompound(page) && page_private(page) != (unsigned long)page); 27 VM_BUG_ON(PageCompound(page) && PageTail(page));
28 VM_BUG_ON(atomic_read(&page->_count)); 28 VM_BUG_ON(atomic_read(&page->_count));
29 set_page_count(page, 1); 29 set_page_count(page, 1);
30} 30}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 542fc088ff50..fc241fe295ab 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -225,7 +225,7 @@ static void bad_page(struct page *page)
225 225
226static void free_compound_page(struct page *page) 226static void free_compound_page(struct page *page)
227{ 227{
228 __free_pages_ok(page, (unsigned long)page[1].lru.prev); 228 __free_pages_ok(page, compound_order(page));
229} 229}
230 230
231static void prep_compound_page(struct page *page, unsigned long order) 231static void prep_compound_page(struct page *page, unsigned long order)
@@ -234,12 +234,14 @@ static void prep_compound_page(struct page *page, unsigned long order)
234 int nr_pages = 1 << order; 234 int nr_pages = 1 << order;
235 235
236 set_compound_page_dtor(page, free_compound_page); 236 set_compound_page_dtor(page, free_compound_page);
237 page[1].lru.prev = (void *)order; 237 set_compound_order(page, order);
238 for (i = 0; i < nr_pages; i++) { 238 __SetPageCompound(page);
239 for (i = 1; i < nr_pages; i++) {
239 struct page *p = page + i; 240 struct page *p = page + i;
240 241
242 __SetPageTail(p);
241 __SetPageCompound(p); 243 __SetPageCompound(p);
242 set_page_private(p, (unsigned long)page); 244 p->first_page = page;
243 } 245 }
244} 246}
245 247
@@ -248,15 +250,19 @@ static void destroy_compound_page(struct page *page, unsigned long order)
248 int i; 250 int i;
249 int nr_pages = 1 << order; 251 int nr_pages = 1 << order;
250 252
251 if (unlikely((unsigned long)page[1].lru.prev != order)) 253 if (unlikely(compound_order(page) != order))
252 bad_page(page); 254 bad_page(page);
253 255
254 for (i = 0; i < nr_pages; i++) { 256 if (unlikely(!PageCompound(page)))
257 bad_page(page);
258 __ClearPageCompound(page);
259 for (i = 1; i < nr_pages; i++) {
255 struct page *p = page + i; 260 struct page *p = page + i;
256 261
257 if (unlikely(!PageCompound(p) | 262 if (unlikely(!PageCompound(p) | !PageTail(p) |
258 (page_private(p) != (unsigned long)page))) 263 (p->first_page != page)))
259 bad_page(page); 264 bad_page(page);
265 __ClearPageTail(p);
260 __ClearPageCompound(p); 266 __ClearPageCompound(p);
261 } 267 }
262} 268}
@@ -429,13 +435,18 @@ static inline int free_pages_check(struct page *page)
429 1 << PG_private | 435 1 << PG_private |
430 1 << PG_locked | 436 1 << PG_locked |
431 1 << PG_active | 437 1 << PG_active |
432 1 << PG_reclaim |
433 1 << PG_slab | 438 1 << PG_slab |
434 1 << PG_swapcache | 439 1 << PG_swapcache |
435 1 << PG_writeback | 440 1 << PG_writeback |
436 1 << PG_reserved | 441 1 << PG_reserved |
437 1 << PG_buddy )))) 442 1 << PG_buddy ))))
438 bad_page(page); 443 bad_page(page);
444 /*
445 * PageReclaim == PageTail. It is only an error
446 * for PageReclaim to be set if PageCompound is clear.
447 */
448 if (unlikely(!PageCompound(page) && PageReclaim(page)))
449 bad_page(page);
439 if (PageDirty(page)) 450 if (PageDirty(page))
440 __ClearPageDirty(page); 451 __ClearPageDirty(page);
441 /* 452 /*
diff --git a/mm/slab.c b/mm/slab.c
index 9cd01fa60004..f4b2e22b5c61 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -602,8 +602,7 @@ static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
602 602
603static inline struct kmem_cache *page_get_cache(struct page *page) 603static inline struct kmem_cache *page_get_cache(struct page *page)
604{ 604{
605 if (unlikely(PageCompound(page))) 605 page = compound_head(page);
606 page = (struct page *)page_private(page);
607 BUG_ON(!PageSlab(page)); 606 BUG_ON(!PageSlab(page));
608 return (struct kmem_cache *)page->lru.next; 607 return (struct kmem_cache *)page->lru.next;
609} 608}
@@ -615,8 +614,7 @@ static inline void page_set_slab(struct page *page, struct slab *slab)
615 614
616static inline struct slab *page_get_slab(struct page *page) 615static inline struct slab *page_get_slab(struct page *page)
617{ 616{
618 if (unlikely(PageCompound(page))) 617 page = compound_head(page);
619 page = (struct page *)page_private(page);
620 BUG_ON(!PageSlab(page)); 618 BUG_ON(!PageSlab(page));
621 return (struct slab *)page->lru.prev; 619 return (struct slab *)page->lru.prev;
622} 620}
diff --git a/mm/slub.c b/mm/slub.c
index 9d52cce7c999..8fa1c6e937f5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1325,9 +1325,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
1325 1325
1326 page = virt_to_page(x); 1326 page = virt_to_page(x);
1327 1327
1328 if (unlikely(PageCompound(page))) 1328 page = compound_head(page);
1329 page = page->first_page;
1330
1331 1329
1332 if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER))) 1330 if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER)))
1333 set_tracking(s, x, TRACK_FREE); 1331 set_tracking(s, x, TRACK_FREE);
@@ -1338,10 +1336,7 @@ EXPORT_SYMBOL(kmem_cache_free);
1338/* Figure out on which slab object the object resides */ 1336/* Figure out on which slab object the object resides */
1339static struct page *get_object_page(const void *x) 1337static struct page *get_object_page(const void *x)
1340{ 1338{
1341 struct page *page = virt_to_page(x); 1339 struct page *page = compound_head(virt_to_page(x));
1342
1343 if (unlikely(PageCompound(page)))
1344 page = page->first_page;
1345 1340
1346 if (!PageSlab(page)) 1341 if (!PageSlab(page))
1347 return NULL; 1342 return NULL;
@@ -2081,10 +2076,7 @@ void kfree(const void *x)
2081 if (!x) 2076 if (!x)
2082 return; 2077 return;
2083 2078
2084 page = virt_to_page(x); 2079 page = compound_head(virt_to_page(x));
2085
2086 if (unlikely(PageCompound(page)))
2087 page = page->first_page;
2088 2080
2089 s = page->slab; 2081 s = page->slab;
2090 2082
@@ -2120,10 +2112,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
2120 return NULL; 2112 return NULL;
2121 } 2113 }
2122 2114
2123 page = virt_to_page(p); 2115 page = compound_head(virt_to_page(p));
2124
2125 if (unlikely(PageCompound(page)))
2126 page = page->first_page;
2127 2116
2128 new_cache = get_slab(new_size, flags); 2117 new_cache = get_slab(new_size, flags);
2129 2118
diff --git a/mm/swap.c b/mm/swap.c
index 2ed7be39795e..218c52a24a21 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -55,7 +55,7 @@ static void fastcall __page_cache_release(struct page *page)
55 55
56static void put_compound_page(struct page *page) 56static void put_compound_page(struct page *page)
57{ 57{
58 page = (struct page *)page_private(page); 58 page = compound_head(page);
59 if (put_page_testzero(page)) { 59 if (put_page_testzero(page)) {
60 compound_page_dtor *dtor; 60 compound_page_dtor *dtor;
61 61