aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2006-01-06 03:10:56 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-06 11:33:25 -0500
commitc54ad30c784b84d0275152d0ca80985b21471811 (patch)
tree7a40d6ddbe67360a1d9c577e3a2987d140056303 /mm/page_alloc.c
parentc484d41042e6ccb88089ca41e3b3eed1bafdae21 (diff)
[PATCH] mm: pagealloc opt
Slightly optimise some page allocation and freeing functions by taking advantage of knowing whether or not interrupts are disabled. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 07825c637a58..680cbe5b6ba2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -375,11 +375,10 @@ static int
375free_pages_bulk(struct zone *zone, int count, 375free_pages_bulk(struct zone *zone, int count,
376 struct list_head *list, unsigned int order) 376 struct list_head *list, unsigned int order)
377{ 377{
378 unsigned long flags;
379 struct page *page = NULL; 378 struct page *page = NULL;
380 int ret = 0; 379 int ret = 0;
381 380
382 spin_lock_irqsave(&zone->lock, flags); 381 spin_lock(&zone->lock);
383 zone->all_unreclaimable = 0; 382 zone->all_unreclaimable = 0;
384 zone->pages_scanned = 0; 383 zone->pages_scanned = 0;
385 while (!list_empty(list) && count--) { 384 while (!list_empty(list) && count--) {
@@ -389,12 +388,13 @@ free_pages_bulk(struct zone *zone, int count,
389 __free_pages_bulk(page, zone, order); 388 __free_pages_bulk(page, zone, order);
390 ret++; 389 ret++;
391 } 390 }
392 spin_unlock_irqrestore(&zone->lock, flags); 391 spin_unlock(&zone->lock);
393 return ret; 392 return ret;
394} 393}
395 394
396void __free_pages_ok(struct page *page, unsigned int order) 395void __free_pages_ok(struct page *page, unsigned int order)
397{ 396{
397 unsigned long flags;
398 LIST_HEAD(list); 398 LIST_HEAD(list);
399 int i; 399 int i;
400 int reserved = 0; 400 int reserved = 0;
@@ -415,7 +415,9 @@ void __free_pages_ok(struct page *page, unsigned int order)
415 list_add(&page->lru, &list); 415 list_add(&page->lru, &list);
416 mod_page_state(pgfree, 1 << order); 416 mod_page_state(pgfree, 1 << order);
417 kernel_map_pages(page, 1<<order, 0); 417 kernel_map_pages(page, 1<<order, 0);
418 local_irq_save(flags);
418 free_pages_bulk(page_zone(page), 1, &list, order); 419 free_pages_bulk(page_zone(page), 1, &list, order);
420 local_irq_restore(flags);
419} 421}
420 422
421 423
@@ -539,12 +541,11 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order)
539static int rmqueue_bulk(struct zone *zone, unsigned int order, 541static int rmqueue_bulk(struct zone *zone, unsigned int order,
540 unsigned long count, struct list_head *list) 542 unsigned long count, struct list_head *list)
541{ 543{
542 unsigned long flags;
543 int i; 544 int i;
544 int allocated = 0; 545 int allocated = 0;
545 struct page *page; 546 struct page *page;
546 547
547 spin_lock_irqsave(&zone->lock, flags); 548 spin_lock(&zone->lock);
548 for (i = 0; i < count; ++i) { 549 for (i = 0; i < count; ++i) {
549 page = __rmqueue(zone, order); 550 page = __rmqueue(zone, order);
550 if (page == NULL) 551 if (page == NULL)
@@ -552,7 +553,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
552 allocated++; 553 allocated++;
553 list_add_tail(&page->lru, list); 554 list_add_tail(&page->lru, list);
554 } 555 }
555 spin_unlock_irqrestore(&zone->lock, flags); 556 spin_unlock(&zone->lock);
556 return allocated; 557 return allocated;
557} 558}
558 559
@@ -589,6 +590,7 @@ void drain_remote_pages(void)
589#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU) 590#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
590static void __drain_pages(unsigned int cpu) 591static void __drain_pages(unsigned int cpu)
591{ 592{
593 unsigned long flags;
592 struct zone *zone; 594 struct zone *zone;
593 int i; 595 int i;
594 596
@@ -600,8 +602,10 @@ static void __drain_pages(unsigned int cpu)
600 struct per_cpu_pages *pcp; 602 struct per_cpu_pages *pcp;
601 603
602 pcp = &pset->pcp[i]; 604 pcp = &pset->pcp[i];
605 local_irq_save(flags);
603 pcp->count -= free_pages_bulk(zone, pcp->count, 606 pcp->count -= free_pages_bulk(zone, pcp->count,
604 &pcp->list, 0); 607 &pcp->list, 0);
608 local_irq_restore(flags);
605 } 609 }
606 } 610 }
607} 611}
@@ -744,7 +748,7 @@ again:
744 if (pcp->count <= pcp->low) 748 if (pcp->count <= pcp->low)
745 pcp->count += rmqueue_bulk(zone, 0, 749 pcp->count += rmqueue_bulk(zone, 0,
746 pcp->batch, &pcp->list); 750 pcp->batch, &pcp->list);
747 if (pcp->count) { 751 if (likely(pcp->count)) {
748 page = list_entry(pcp->list.next, struct page, lru); 752 page = list_entry(pcp->list.next, struct page, lru);
749 list_del(&page->lru); 753 list_del(&page->lru);
750 pcp->count--; 754 pcp->count--;