aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2006-01-08 04:00:42 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-08 23:12:40 -0500
commit48db57f8ff10eb09ab887ccb6150b0da0c7be24e (patch)
tree5c9fba3937bb802aa2944af189aae57b7bf7aa8b
parent23316bc86fd31c5d644a71c398ec41d9fecacec4 (diff)
[PATCH] mm: free_pages opt
Try to streamline free_pages_bulk by ensuring callers don't pass in a 'count' that exceeds the list size. Some cleanups: Rename __free_pages_bulk to __free_one_page. Put the page list manipulation from __free_pages_ok into free_one_page. Make __free_pages_ok static. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/page_alloc.c58
1 files changed, 30 insertions, 28 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6b92a945ae6b..ad3d0202cdef 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -308,7 +308,7 @@ static inline int page_is_buddy(struct page *page, int order)
308 * -- wli 308 * -- wli
309 */ 309 */
310 310
311static inline void __free_pages_bulk (struct page *page, 311static inline void __free_one_page(struct page *page,
312 struct zone *zone, unsigned int order) 312 struct zone *zone, unsigned int order)
313{ 313{
314 unsigned long page_idx; 314 unsigned long page_idx;
@@ -383,40 +383,42 @@ static inline int free_pages_check(struct page *page)
383 * And clear the zone's pages_scanned counter, to hold off the "all pages are 383 * And clear the zone's pages_scanned counter, to hold off the "all pages are
384 * pinned" detection logic. 384 * pinned" detection logic.
385 */ 385 */
386static int 386static void free_pages_bulk(struct zone *zone, int count,
387free_pages_bulk(struct zone *zone, int count, 387 struct list_head *list, int order)
388 struct list_head *list, unsigned int order)
389{ 388{
390 struct page *page = NULL;
391 int ret = 0;
392
393 spin_lock(&zone->lock); 389 spin_lock(&zone->lock);
394 zone->all_unreclaimable = 0; 390 zone->all_unreclaimable = 0;
395 zone->pages_scanned = 0; 391 zone->pages_scanned = 0;
396 while (!list_empty(list) && count--) { 392 while (count--) {
393 struct page *page;
394
395 BUG_ON(list_empty(list));
397 page = list_entry(list->prev, struct page, lru); 396 page = list_entry(list->prev, struct page, lru);
398 /* have to delete it as __free_pages_bulk list manipulates */ 397 /* have to delete it as __free_one_page list manipulates */
399 list_del(&page->lru); 398 list_del(&page->lru);
400 __free_pages_bulk(page, zone, order); 399 __free_one_page(page, zone, order);
401 ret++;
402 } 400 }
403 spin_unlock(&zone->lock); 401 spin_unlock(&zone->lock);
404 return ret;
405} 402}
406 403
407void __free_pages_ok(struct page *page, unsigned int order) 404static void free_one_page(struct zone *zone, struct page *page, int order)
408{ 405{
409 unsigned long flags;
410 LIST_HEAD(list); 406 LIST_HEAD(list);
407 list_add(&page->lru, &list);
408 free_pages_bulk(zone, 1, &list, order);
409}
410
411static void __free_pages_ok(struct page *page, unsigned int order)
412{
413 unsigned long flags;
411 int i; 414 int i;
412 int reserved = 0; 415 int reserved = 0;
413 416
414 arch_free_page(page, order); 417 arch_free_page(page, order);
415 418
416#ifndef CONFIG_MMU 419#ifndef CONFIG_MMU
417 if (order > 0) 420 for (i = 1 ; i < (1 << order) ; ++i)
418 for (i = 1 ; i < (1 << order) ; ++i) 421 __put_page(page + i);
419 __put_page(page + i);
420#endif 422#endif
421 423
422 for (i = 0 ; i < (1 << order) ; ++i) 424 for (i = 0 ; i < (1 << order) ; ++i)
@@ -424,11 +426,10 @@ void __free_pages_ok(struct page *page, unsigned int order)
424 if (reserved) 426 if (reserved)
425 return; 427 return;
426 428
427 list_add(&page->lru, &list); 429 kernel_map_pages(page, 1 << order, 0);
428 kernel_map_pages(page, 1<<order, 0);
429 local_irq_save(flags); 430 local_irq_save(flags);
430 __mod_page_state(pgfree, 1 << order); 431 __mod_page_state(pgfree, 1 << order);
431 free_pages_bulk(page_zone(page), 1, &list, order); 432 free_one_page(page_zone(page), page, order);
432 local_irq_restore(flags); 433 local_irq_restore(flags);
433} 434}
434 435
@@ -602,9 +603,8 @@ void drain_remote_pages(void)
602 struct per_cpu_pages *pcp; 603 struct per_cpu_pages *pcp;
603 604
604 pcp = &pset->pcp[i]; 605 pcp = &pset->pcp[i];
605 if (pcp->count) 606 free_pages_bulk(zone, pcp->count, &pcp->list, 0);
606 pcp->count -= free_pages_bulk(zone, pcp->count, 607 pcp->count = 0;
607 &pcp->list, 0);
608 } 608 }
609 } 609 }
610 local_irq_restore(flags); 610 local_irq_restore(flags);
@@ -627,8 +627,8 @@ static void __drain_pages(unsigned int cpu)
627 627
628 pcp = &pset->pcp[i]; 628 pcp = &pset->pcp[i];
629 local_irq_save(flags); 629 local_irq_save(flags);
630 pcp->count -= free_pages_bulk(zone, pcp->count, 630 free_pages_bulk(zone, pcp->count, &pcp->list, 0);
631 &pcp->list, 0); 631 pcp->count = 0;
632 local_irq_restore(flags); 632 local_irq_restore(flags);
633 } 633 }
634 } 634 }
@@ -719,8 +719,10 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
719 __inc_page_state(pgfree); 719 __inc_page_state(pgfree);
720 list_add(&page->lru, &pcp->list); 720 list_add(&page->lru, &pcp->list);
721 pcp->count++; 721 pcp->count++;
722 if (pcp->count >= pcp->high) 722 if (pcp->count >= pcp->high) {
723 pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0); 723 free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
724 pcp->count -= pcp->batch;
725 }
724 local_irq_restore(flags); 726 local_irq_restore(flags);
725 put_cpu(); 727 put_cpu();
726} 728}
@@ -759,7 +761,7 @@ static struct page *buffered_rmqueue(struct zonelist *zonelist,
759 761
760again: 762again:
761 cpu = get_cpu(); 763 cpu = get_cpu();
762 if (order == 0) { 764 if (likely(order == 0)) {
763 struct per_cpu_pages *pcp; 765 struct per_cpu_pages *pcp;
764 766
765 pcp = &zone_pcp(zone, cpu)->pcp[cold]; 767 pcp = &zone_pcp(zone, cpu)->pcp[cold];