aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2006-01-06 03:11:20 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-06 11:33:29 -0500
commita74609fafa2e5cc31d558012abaaa55ec9ad9da4 (patch)
tree0be653692864d99da345b575dfe2083994ee1d21 /mm/page_alloc.c
parentd3cb487149bd706aa6aeb02042332a450978dc1c (diff)
[PATCH] mm: page_state opt
Optimise page_state manipulations by introducing interrupt unsafe accessors to page_state fields. Callers must provide their own locking (either disable interrupts or not update from interrupt context). Switch over the hot callsites that can easily be moved under interrupts off sections. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c89
1 files changed, 50 insertions, 39 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7f580779abdb..fd47494cb989 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -424,9 +424,9 @@ void __free_pages_ok(struct page *page, unsigned int order)
424 return; 424 return;
425 425
426 list_add(&page->lru, &list); 426 list_add(&page->lru, &list);
427 mod_page_state(pgfree, 1 << order);
428 kernel_map_pages(page, 1<<order, 0); 427 kernel_map_pages(page, 1<<order, 0);
429 local_irq_save(flags); 428 local_irq_save(flags);
429 __mod_page_state(pgfree, 1 << order);
430 free_pages_bulk(page_zone(page), 1, &list, order); 430 free_pages_bulk(page_zone(page), 1, &list, order);
431 local_irq_restore(flags); 431 local_irq_restore(flags);
432} 432}
@@ -674,18 +674,14 @@ void drain_local_pages(void)
674} 674}
675#endif /* CONFIG_PM */ 675#endif /* CONFIG_PM */
676 676
677static void zone_statistics(struct zonelist *zonelist, struct zone *z) 677static void zone_statistics(struct zonelist *zonelist, struct zone *z, int cpu)
678{ 678{
679#ifdef CONFIG_NUMA 679#ifdef CONFIG_NUMA
680 unsigned long flags;
681 int cpu;
682 pg_data_t *pg = z->zone_pgdat; 680 pg_data_t *pg = z->zone_pgdat;
683 pg_data_t *orig = zonelist->zones[0]->zone_pgdat; 681 pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
684 struct per_cpu_pageset *p; 682 struct per_cpu_pageset *p;
685 683
686 local_irq_save(flags); 684 p = zone_pcp(z, cpu);
687 cpu = smp_processor_id();
688 p = zone_pcp(z,cpu);
689 if (pg == orig) { 685 if (pg == orig) {
690 p->numa_hit++; 686 p->numa_hit++;
691 } else { 687 } else {
@@ -696,7 +692,6 @@ static void zone_statistics(struct zonelist *zonelist, struct zone *z)
696 p->local_node++; 692 p->local_node++;
697 else 693 else
698 p->other_node++; 694 p->other_node++;
699 local_irq_restore(flags);
700#endif 695#endif
701} 696}
702 697
@@ -716,11 +711,11 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
716 if (free_pages_check(page)) 711 if (free_pages_check(page))
717 return; 712 return;
718 713
719 inc_page_state(pgfree);
720 kernel_map_pages(page, 1, 0); 714 kernel_map_pages(page, 1, 0);
721 715
722 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 716 pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
723 local_irq_save(flags); 717 local_irq_save(flags);
718 __inc_page_state(pgfree);
724 list_add(&page->lru, &pcp->list); 719 list_add(&page->lru, &pcp->list);
725 pcp->count++; 720 pcp->count++;
726 if (pcp->count >= pcp->high) 721 if (pcp->count >= pcp->high)
@@ -753,49 +748,58 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
753 * we cheat by calling it from here, in the order > 0 path. Saves a branch 748 * we cheat by calling it from here, in the order > 0 path. Saves a branch
754 * or two. 749 * or two.
755 */ 750 */
756static struct page * 751static struct page *buffered_rmqueue(struct zonelist *zonelist,
757buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags) 752 struct zone *zone, int order, gfp_t gfp_flags)
758{ 753{
759 unsigned long flags; 754 unsigned long flags;
760 struct page *page; 755 struct page *page;
761 int cold = !!(gfp_flags & __GFP_COLD); 756 int cold = !!(gfp_flags & __GFP_COLD);
757 int cpu;
762 758
763again: 759again:
760 cpu = get_cpu();
764 if (order == 0) { 761 if (order == 0) {
765 struct per_cpu_pages *pcp; 762 struct per_cpu_pages *pcp;
766 763
767 page = NULL; 764 pcp = &zone_pcp(zone, cpu)->pcp[cold];
768 pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
769 local_irq_save(flags); 765 local_irq_save(flags);
770 if (!pcp->count) 766 if (!pcp->count) {
771 pcp->count += rmqueue_bulk(zone, 0, 767 pcp->count += rmqueue_bulk(zone, 0,
772 pcp->batch, &pcp->list); 768 pcp->batch, &pcp->list);
773 if (likely(pcp->count)) { 769 if (unlikely(!pcp->count))
774 page = list_entry(pcp->list.next, struct page, lru); 770 goto failed;
775 list_del(&page->lru);
776 pcp->count--;
777 } 771 }
778 local_irq_restore(flags); 772 page = list_entry(pcp->list.next, struct page, lru);
779 put_cpu(); 773 list_del(&page->lru);
774 pcp->count--;
780 } else { 775 } else {
781 spin_lock_irqsave(&zone->lock, flags); 776 spin_lock_irqsave(&zone->lock, flags);
782 page = __rmqueue(zone, order); 777 page = __rmqueue(zone, order);
783 spin_unlock_irqrestore(&zone->lock, flags); 778 spin_unlock(&zone->lock);
779 if (!page)
780 goto failed;
784 } 781 }
785 782
786 if (page != NULL) { 783 __mod_page_state_zone(zone, pgalloc, 1 << order);
787 BUG_ON(bad_range(zone, page)); 784 zone_statistics(zonelist, zone, cpu);
788 mod_page_state_zone(zone, pgalloc, 1 << order); 785 local_irq_restore(flags);
789 if (prep_new_page(page, order)) 786 put_cpu();
790 goto again;
791 787
792 if (gfp_flags & __GFP_ZERO) 788 BUG_ON(bad_range(zone, page));
793 prep_zero_page(page, order, gfp_flags); 789 if (prep_new_page(page, order))
790 goto again;
794 791
795 if (order && (gfp_flags & __GFP_COMP)) 792 if (gfp_flags & __GFP_ZERO)
796 prep_compound_page(page, order); 793 prep_zero_page(page, order, gfp_flags);
797 } 794
795 if (order && (gfp_flags & __GFP_COMP))
796 prep_compound_page(page, order);
798 return page; 797 return page;
798
799failed:
800 local_irq_restore(flags);
801 put_cpu();
802 return NULL;
799} 803}
800 804
801#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */ 805#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */
@@ -871,9 +875,8 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
871 continue; 875 continue;
872 } 876 }
873 877
874 page = buffered_rmqueue(*z, order, gfp_mask); 878 page = buffered_rmqueue(zonelist, *z, order, gfp_mask);
875 if (page) { 879 if (page) {
876 zone_statistics(zonelist, *z);
877 break; 880 break;
878 } 881 }
879 } while (*(++z) != NULL); 882 } while (*(++z) != NULL);
@@ -1248,7 +1251,7 @@ void get_full_page_state(struct page_state *ret)
1248 __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask); 1251 __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask);
1249} 1252}
1250 1253
1251unsigned long __read_page_state(unsigned long offset) 1254unsigned long read_page_state_offset(unsigned long offset)
1252{ 1255{
1253 unsigned long ret = 0; 1256 unsigned long ret = 0;
1254 int cpu; 1257 int cpu;
@@ -1262,18 +1265,26 @@ unsigned long __read_page_state(unsigned long offset)
1262 return ret; 1265 return ret;
1263} 1266}
1264 1267
1265void __mod_page_state(unsigned long offset, unsigned long delta) 1268void __mod_page_state_offset(unsigned long offset, unsigned long delta)
1269{
1270 void *ptr;
1271
1272 ptr = &__get_cpu_var(page_states);
1273 *(unsigned long *)(ptr + offset) += delta;
1274}
1275EXPORT_SYMBOL(__mod_page_state_offset);
1276
1277void mod_page_state_offset(unsigned long offset, unsigned long delta)
1266{ 1278{
1267 unsigned long flags; 1279 unsigned long flags;
1268 void* ptr; 1280 void *ptr;
1269 1281
1270 local_irq_save(flags); 1282 local_irq_save(flags);
1271 ptr = &__get_cpu_var(page_states); 1283 ptr = &__get_cpu_var(page_states);
1272 *(unsigned long*)(ptr + offset) += delta; 1284 *(unsigned long *)(ptr + offset) += delta;
1273 local_irq_restore(flags); 1285 local_irq_restore(flags);
1274} 1286}
1275 1287EXPORT_SYMBOL(mod_page_state_offset);
1276EXPORT_SYMBOL(__mod_page_state);
1277 1288
1278void __get_zone_counts(unsigned long *active, unsigned long *inactive, 1289void __get_zone_counts(unsigned long *active, unsigned long *inactive,
1279 unsigned long *free, struct pglist_data *pgdat) 1290 unsigned long *free, struct pglist_data *pgdat)