aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2009-06-16 18:32:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 22:47:33 -0400
commit3dd2826698b6902aafd9441ce28ebb44735fd0d6 (patch)
treec9bda4a84a0545d4acf41123072fd018100e63f8 /mm/page_alloc.c
parent5117f45d11a9ee62d9b086f1312f3f31781ff155 (diff)
page allocator: calculate the migratetype for allocation only once
GFP mask is converted into a migratetype when deciding which pagelist to take a page from. However, it is happening multiple times per allocation, at least once per zone traversed. Calculate it once. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c43
1 files changed, 26 insertions, 17 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8fc6d1f42f0b..d3be076ea9c5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1061,13 +1061,13 @@ void split_page(struct page *page, unsigned int order)
1061 * or two. 1061 * or two.
1062 */ 1062 */
1063static struct page *buffered_rmqueue(struct zone *preferred_zone, 1063static struct page *buffered_rmqueue(struct zone *preferred_zone,
1064 struct zone *zone, int order, gfp_t gfp_flags) 1064 struct zone *zone, int order, gfp_t gfp_flags,
1065 int migratetype)
1065{ 1066{
1066 unsigned long flags; 1067 unsigned long flags;
1067 struct page *page; 1068 struct page *page;
1068 int cold = !!(gfp_flags & __GFP_COLD); 1069 int cold = !!(gfp_flags & __GFP_COLD);
1069 int cpu; 1070 int cpu;
1070 int migratetype = allocflags_to_migratetype(gfp_flags);
1071 1071
1072again: 1072again:
1073 cpu = get_cpu(); 1073 cpu = get_cpu();
@@ -1389,7 +1389,7 @@ static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1389static struct page * 1389static struct page *
1390get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, 1390get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1391 struct zonelist *zonelist, int high_zoneidx, int alloc_flags, 1391 struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1392 struct zone *preferred_zone) 1392 struct zone *preferred_zone, int migratetype)
1393{ 1393{
1394 struct zoneref *z; 1394 struct zoneref *z;
1395 struct page *page = NULL; 1395 struct page *page = NULL;
@@ -1433,7 +1433,8 @@ zonelist_scan:
1433 } 1433 }
1434 } 1434 }
1435 1435
1436 page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask); 1436 page = buffered_rmqueue(preferred_zone, zone, order,
1437 gfp_mask, migratetype);
1437 if (page) 1438 if (page)
1438 break; 1439 break;
1439this_zone_full: 1440this_zone_full:
@@ -1495,7 +1496,8 @@ should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1495static inline struct page * 1496static inline struct page *
1496__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 1497__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1497 struct zonelist *zonelist, enum zone_type high_zoneidx, 1498 struct zonelist *zonelist, enum zone_type high_zoneidx,
1498 nodemask_t *nodemask, struct zone *preferred_zone) 1499 nodemask_t *nodemask, struct zone *preferred_zone,
1500 int migratetype)
1499{ 1501{
1500 struct page *page; 1502 struct page *page;
1501 1503
@@ -1513,7 +1515,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1513 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, 1515 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1514 order, zonelist, high_zoneidx, 1516 order, zonelist, high_zoneidx,
1515 ALLOC_WMARK_HIGH|ALLOC_CPUSET, 1517 ALLOC_WMARK_HIGH|ALLOC_CPUSET,
1516 preferred_zone); 1518 preferred_zone, migratetype);
1517 if (page) 1519 if (page)
1518 goto out; 1520 goto out;
1519 1521
@@ -1534,7 +1536,7 @@ static inline struct page *
1534__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 1536__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1535 struct zonelist *zonelist, enum zone_type high_zoneidx, 1537 struct zonelist *zonelist, enum zone_type high_zoneidx,
1536 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 1538 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1537 unsigned long *did_some_progress) 1539 int migratetype, unsigned long *did_some_progress)
1538{ 1540{
1539 struct page *page = NULL; 1541 struct page *page = NULL;
1540 struct reclaim_state reclaim_state; 1542 struct reclaim_state reclaim_state;
@@ -1567,7 +1569,8 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1567 if (likely(*did_some_progress)) 1569 if (likely(*did_some_progress))
1568 page = get_page_from_freelist(gfp_mask, nodemask, order, 1570 page = get_page_from_freelist(gfp_mask, nodemask, order,
1569 zonelist, high_zoneidx, 1571 zonelist, high_zoneidx,
1570 alloc_flags, preferred_zone); 1572 alloc_flags, preferred_zone,
1573 migratetype);
1571 return page; 1574 return page;
1572} 1575}
1573 1576
@@ -1587,14 +1590,15 @@ is_allocation_high_priority(struct task_struct *p, gfp_t gfp_mask)
1587static inline struct page * 1590static inline struct page *
1588__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, 1591__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1589 struct zonelist *zonelist, enum zone_type high_zoneidx, 1592 struct zonelist *zonelist, enum zone_type high_zoneidx,
1590 nodemask_t *nodemask, struct zone *preferred_zone) 1593 nodemask_t *nodemask, struct zone *preferred_zone,
1594 int migratetype)
1591{ 1595{
1592 struct page *page; 1596 struct page *page;
1593 1597
1594 do { 1598 do {
1595 page = get_page_from_freelist(gfp_mask, nodemask, order, 1599 page = get_page_from_freelist(gfp_mask, nodemask, order,
1596 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS, 1600 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
1597 preferred_zone); 1601 preferred_zone, migratetype);
1598 1602
1599 if (!page && gfp_mask & __GFP_NOFAIL) 1603 if (!page && gfp_mask & __GFP_NOFAIL)
1600 congestion_wait(WRITE, HZ/50); 1604 congestion_wait(WRITE, HZ/50);
@@ -1617,7 +1621,8 @@ void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
1617static inline struct page * 1621static inline struct page *
1618__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 1622__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1619 struct zonelist *zonelist, enum zone_type high_zoneidx, 1623 struct zonelist *zonelist, enum zone_type high_zoneidx,
1620 nodemask_t *nodemask, struct zone *preferred_zone) 1624 nodemask_t *nodemask, struct zone *preferred_zone,
1625 int migratetype)
1621{ 1626{
1622 const gfp_t wait = gfp_mask & __GFP_WAIT; 1627 const gfp_t wait = gfp_mask & __GFP_WAIT;
1623 struct page *page = NULL; 1628 struct page *page = NULL;
@@ -1668,7 +1673,8 @@ restart:
1668 */ 1673 */
1669 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, 1674 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
1670 high_zoneidx, alloc_flags, 1675 high_zoneidx, alloc_flags,
1671 preferred_zone); 1676 preferred_zone,
1677 migratetype);
1672 if (page) 1678 if (page)
1673 goto got_pg; 1679 goto got_pg;
1674 1680
@@ -1678,7 +1684,8 @@ rebalance:
1678 /* Do not dip into emergency reserves if specified */ 1684 /* Do not dip into emergency reserves if specified */
1679 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 1685 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
1680 page = __alloc_pages_high_priority(gfp_mask, order, 1686 page = __alloc_pages_high_priority(gfp_mask, order,
1681 zonelist, high_zoneidx, nodemask, preferred_zone); 1687 zonelist, high_zoneidx, nodemask, preferred_zone,
1688 migratetype);
1682 if (page) 1689 if (page)
1683 goto got_pg; 1690 goto got_pg;
1684 } 1691 }
@@ -1696,7 +1703,7 @@ rebalance:
1696 zonelist, high_zoneidx, 1703 zonelist, high_zoneidx,
1697 nodemask, 1704 nodemask,
1698 alloc_flags, preferred_zone, 1705 alloc_flags, preferred_zone,
1699 &did_some_progress); 1706 migratetype, &did_some_progress);
1700 if (page) 1707 if (page)
1701 goto got_pg; 1708 goto got_pg;
1702 1709
@@ -1708,7 +1715,8 @@ rebalance:
1708 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { 1715 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1709 page = __alloc_pages_may_oom(gfp_mask, order, 1716 page = __alloc_pages_may_oom(gfp_mask, order,
1710 zonelist, high_zoneidx, 1717 zonelist, high_zoneidx,
1711 nodemask, preferred_zone); 1718 nodemask, preferred_zone,
1719 migratetype);
1712 if (page) 1720 if (page)
1713 goto got_pg; 1721 goto got_pg;
1714 1722
@@ -1755,6 +1763,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
1755 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 1763 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1756 struct zone *preferred_zone; 1764 struct zone *preferred_zone;
1757 struct page *page; 1765 struct page *page;
1766 int migratetype = allocflags_to_migratetype(gfp_mask);
1758 1767
1759 lockdep_trace_alloc(gfp_mask); 1768 lockdep_trace_alloc(gfp_mask);
1760 1769
@@ -1779,11 +1788,11 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
1779 /* First allocation attempt */ 1788 /* First allocation attempt */
1780 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, 1789 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
1781 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET, 1790 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
1782 preferred_zone); 1791 preferred_zone, migratetype);
1783 if (unlikely(!page)) 1792 if (unlikely(!page))
1784 page = __alloc_pages_slowpath(gfp_mask, order, 1793 page = __alloc_pages_slowpath(gfp_mask, order,
1785 zonelist, high_zoneidx, nodemask, 1794 zonelist, high_zoneidx, nodemask,
1786 preferred_zone); 1795 preferred_zone, migratetype);
1787 1796
1788 return page; 1797 return page;
1789} 1798}