aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2006-01-06 03:11:11 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-06 11:33:26 -0500
commit224abf92b2f439a9030f21d2926ec8047d1ffcdb (patch)
treee45074383aea04ef8b3b8d2cc04a3b34959f9e65 /mm
parent9328b8faae922e52073785ed6c1eaa8565648a0e (diff)
[PATCH] mm: bad_page optimisation
Cut down size slightly by not passing bad_page the function name (it should be able to be determined by dump_stack()). And cut down the number of printks in bad_page. Also, cut down some branching in the destroy_compound_page path. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c44
1 files changed, 20 insertions, 24 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e12154d9c4ed..b9fd2c238f13 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -132,16 +132,16 @@ static inline int bad_range(struct zone *zone, struct page *page)
132} 132}
133#endif 133#endif
134 134
135static void bad_page(const char *function, struct page *page) 135static void bad_page(struct page *page)
136{ 136{
137 printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n", 137 printk(KERN_EMERG "Bad page state in process '%s'\n"
138 function, current->comm, page); 138 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
139 printk(KERN_EMERG "flags:0x%0*lx mapping:%p mapcount:%d count:%d\n", 139 "Trying to fix it up, but a reboot is needed\n"
140 (int)(2*sizeof(unsigned long)), (unsigned long)page->flags, 140 "Backtrace:\n",
141 page->mapping, page_mapcount(page), page_count(page)); 141 current->comm, page, (int)(2*sizeof(unsigned long)),
142 printk(KERN_EMERG "Backtrace:\n"); 142 (unsigned long)page->flags, page->mapping,
143 page_mapcount(page), page_count(page));
143 dump_stack(); 144 dump_stack();
144 printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n");
145 page->flags &= ~(1 << PG_lru | 145 page->flags &= ~(1 << PG_lru |
146 1 << PG_private | 146 1 << PG_private |
147 1 << PG_locked | 147 1 << PG_locked |
@@ -194,19 +194,15 @@ static void destroy_compound_page(struct page *page, unsigned long order)
194 int i; 194 int i;
195 int nr_pages = 1 << order; 195 int nr_pages = 1 << order;
196 196
197 if (!PageCompound(page)) 197 if (unlikely(page[1].index != order))
198 return; 198 bad_page(page);
199
200 if (page[1].index != order)
201 bad_page(__FUNCTION__, page);
202 199
203 for (i = 0; i < nr_pages; i++) { 200 for (i = 0; i < nr_pages; i++) {
204 struct page *p = page + i; 201 struct page *p = page + i;
205 202
206 if (!PageCompound(p)) 203 if (unlikely(!PageCompound(p) |
207 bad_page(__FUNCTION__, page); 204 (page_private(p) != (unsigned long)page)))
208 if (page_private(p) != (unsigned long)page) 205 bad_page(page);
209 bad_page(__FUNCTION__, page);
210 ClearPageCompound(p); 206 ClearPageCompound(p);
211 } 207 }
212} 208}
@@ -316,7 +312,7 @@ static inline void __free_pages_bulk (struct page *page,
316 unsigned long page_idx; 312 unsigned long page_idx;
317 int order_size = 1 << order; 313 int order_size = 1 << order;
318 314
319 if (unlikely(order)) 315 if (unlikely(PageCompound(page)))
320 destroy_compound_page(page, order); 316 destroy_compound_page(page, order);
321 317
322 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 318 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
@@ -348,7 +344,7 @@ static inline void __free_pages_bulk (struct page *page,
348 zone->free_area[order].nr_free++; 344 zone->free_area[order].nr_free++;
349} 345}
350 346
351static inline int free_pages_check(const char *function, struct page *page) 347static inline int free_pages_check(struct page *page)
352{ 348{
353 if (unlikely(page_mapcount(page) | 349 if (unlikely(page_mapcount(page) |
354 (page->mapping != NULL) | 350 (page->mapping != NULL) |
@@ -363,7 +359,7 @@ static inline int free_pages_check(const char *function, struct page *page)
363 1 << PG_swapcache | 359 1 << PG_swapcache |
364 1 << PG_writeback | 360 1 << PG_writeback |
365 1 << PG_reserved )))) 361 1 << PG_reserved ))))
366 bad_page(function, page); 362 bad_page(page);
367 if (PageDirty(page)) 363 if (PageDirty(page))
368 __ClearPageDirty(page); 364 __ClearPageDirty(page);
369 /* 365 /*
@@ -422,7 +418,7 @@ void __free_pages_ok(struct page *page, unsigned int order)
422#endif 418#endif
423 419
424 for (i = 0 ; i < (1 << order) ; ++i) 420 for (i = 0 ; i < (1 << order) ; ++i)
425 reserved += free_pages_check(__FUNCTION__, page + i); 421 reserved += free_pages_check(page + i);
426 if (reserved) 422 if (reserved)
427 return; 423 return;
428 424
@@ -517,7 +513,7 @@ static int prep_new_page(struct page *page, int order)
517 1 << PG_swapcache | 513 1 << PG_swapcache |
518 1 << PG_writeback | 514 1 << PG_writeback |
519 1 << PG_reserved )))) 515 1 << PG_reserved ))))
520 bad_page(__FUNCTION__, page); 516 bad_page(page);
521 517
522 /* 518 /*
523 * For now, we report if PG_reserved was found set, but do not 519 * For now, we report if PG_reserved was found set, but do not
@@ -716,7 +712,7 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
716 712
717 if (PageAnon(page)) 713 if (PageAnon(page))
718 page->mapping = NULL; 714 page->mapping = NULL;
719 if (free_pages_check(__FUNCTION__, page)) 715 if (free_pages_check(page))
720 return; 716 return;
721 717
722 inc_page_state(pgfree); 718 inc_page_state(pgfree);