aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-10-29 21:16:12 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:39 -0400
commitb5810039a54e5babf428e9a1e89fc1940fabff11 (patch)
tree835836cb527ec9bd525f93eb7e016f3dfb8c8ae2 /mm/page_alloc.c
parentf9c98d0287de42221c624482fd4f8d485c98ab22 (diff)
[PATCH] core remove PageReserved
Remove PageReserved() calls from core code by tightening VM_RESERVED handling in mm/ to cover PageReserved functionality. PageReserved special casing is removed from get_page and put_page. All setting and clearing of PageReserved is retained, and it is now flagged in the page_alloc checks to help ensure we don't introduce any refcount based freeing of Reserved pages. MAP_PRIVATE, PROT_WRITE of VM_RESERVED regions is tentatively being deprecated. We never completely handled it correctly anyway, and is be reintroduced in future if required (Hugh has a proof of concept). Once PageReserved() calls are removed from kernel/power/swsusp.c, and all arch/ and driver code, the Set and Clear calls, and the PG_reserved bit can be trivially removed. Last real user of PageReserved is swsusp, which uses PageReserved to determine whether a struct page points to valid memory or not. This still needs to be addressed (a generic page_is_ram() should work). A last caveat: the ZERO_PAGE is now refcounted and managed with rmap (and thus mapcounted and count towards shared rss). These writes to the struct page could cause excessive cacheline bouncing on big systems. There are a number of ways this could be addressed if it is an issue. Signed-off-by: Nick Piggin <npiggin@suse.de> Refcount bug fix for filemap_xip.c Signed-off-by: Carsten Otte <cotte@de.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 60663232fbb2..0541288ebf4b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -114,7 +114,8 @@ static void bad_page(const char *function, struct page *page)
114 1 << PG_reclaim | 114 1 << PG_reclaim |
115 1 << PG_slab | 115 1 << PG_slab |
116 1 << PG_swapcache | 116 1 << PG_swapcache |
117 1 << PG_writeback); 117 1 << PG_writeback |
118 1 << PG_reserved );
118 set_page_count(page, 0); 119 set_page_count(page, 0);
119 reset_page_mapcount(page); 120 reset_page_mapcount(page);
120 page->mapping = NULL; 121 page->mapping = NULL;
@@ -244,7 +245,6 @@ static inline int page_is_buddy(struct page *page, int order)
244{ 245{
245 if (PagePrivate(page) && 246 if (PagePrivate(page) &&
246 (page_order(page) == order) && 247 (page_order(page) == order) &&
247 !PageReserved(page) &&
248 page_count(page) == 0) 248 page_count(page) == 0)
249 return 1; 249 return 1;
250 return 0; 250 return 0;
@@ -327,7 +327,8 @@ static inline void free_pages_check(const char *function, struct page *page)
327 1 << PG_reclaim | 327 1 << PG_reclaim |
328 1 << PG_slab | 328 1 << PG_slab |
329 1 << PG_swapcache | 329 1 << PG_swapcache |
330 1 << PG_writeback ))) 330 1 << PG_writeback |
331 1 << PG_reserved )))
331 bad_page(function, page); 332 bad_page(function, page);
332 if (PageDirty(page)) 333 if (PageDirty(page))
333 __ClearPageDirty(page); 334 __ClearPageDirty(page);
@@ -455,7 +456,8 @@ static void prep_new_page(struct page *page, int order)
455 1 << PG_reclaim | 456 1 << PG_reclaim |
456 1 << PG_slab | 457 1 << PG_slab |
457 1 << PG_swapcache | 458 1 << PG_swapcache |
458 1 << PG_writeback ))) 459 1 << PG_writeback |
460 1 << PG_reserved )))
459 bad_page(__FUNCTION__, page); 461 bad_page(__FUNCTION__, page);
460 462
461 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 463 page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
@@ -1016,7 +1018,7 @@ void __pagevec_free(struct pagevec *pvec)
1016 1018
1017fastcall void __free_pages(struct page *page, unsigned int order) 1019fastcall void __free_pages(struct page *page, unsigned int order)
1018{ 1020{
1019 if (!PageReserved(page) && put_page_testzero(page)) { 1021 if (put_page_testzero(page)) {
1020 if (order == 0) 1022 if (order == 0)
1021 free_hot_page(page); 1023 free_hot_page(page);
1022 else 1024 else
@@ -1674,7 +1676,7 @@ void __init memmap_init_zone(unsigned long size, int nid, unsigned long zone,
1674 continue; 1676 continue;
1675 page = pfn_to_page(pfn); 1677 page = pfn_to_page(pfn);
1676 set_page_links(page, zone, nid, pfn); 1678 set_page_links(page, zone, nid, pfn);
1677 set_page_count(page, 0); 1679 set_page_count(page, 1);
1678 reset_page_mapcount(page); 1680 reset_page_mapcount(page);
1679 SetPageReserved(page); 1681 SetPageReserved(page);
1680 INIT_LIST_HEAD(&page->lru); 1682 INIT_LIST_HEAD(&page->lru);