summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@linux.intel.com>2019-05-13 20:21:13 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-14 12:47:49 -0400
commit56ec43d8b02719402c9fcf984feb52ec2300f8a5 (patch)
tree83af2c4b65d42fad4442b367e26e229924ce9108
parent5470dea49f5382257c242ac617d908267727f1a8 (diff)
mm: drop meminit_pfn_in_nid as it is redundant
As best as I can tell the meminit_pfn_in_nid call is completely redundant. The deferred memory initialization is already making use of for_each_free_mem_range which in turn will call into __next_mem_range which will only return a memory range if it matches the node ID provided assuming it is not NUMA_NO_NODE. I am operating on the assumption that there are no zones or pgdata_t structures that have a NUMA node of NUMA_NO_NODE associated with them. If that is the case then __next_mem_range will never return a memory range that doesn't match the zone's node ID and as such the check is redundant. So one piece I would like to verify on this is if this works for ia64. Technically it was using a different approach to get the node ID, but it seems to have the node ID also encoded into the memblock. So I am assuming this is okay, but would like to get confirmation on that. On my x86_64 test system with 384GB of memory per node I saw a reduction in initialization time from 2.80s to 1.85s as a result of this patch. Link: http://lkml.kernel.org/r/20190405221219.12227.93957.stgit@localhost.localdomain Signed-off-by: Alexander Duyck <alexander.h.duyck@linux.intel.com> Reviewed-by: Pavel Tatashin <pavel.tatashin@microsoft.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Jiang <dave.jiang@intel.com> Cc: David S. Miller <davem@davemloft.net> Cc: Ingo Molnar <mingo@kernel.org> Cc: Khalid Aziz <khalid.aziz@oracle.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Laurent Dufour <ldufour@linux.vnet.ibm.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: Pavel Tatashin <pasha.tatashin@soleen.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: <yi.z.zhang@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/page_alloc.c51
1 files changed, 14 insertions, 37 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 909adce33398..25b82be438d7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1416,36 +1416,22 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
1416#endif 1416#endif
1417 1417
1418#ifdef CONFIG_NODES_SPAN_OTHER_NODES 1418#ifdef CONFIG_NODES_SPAN_OTHER_NODES
1419static inline bool __meminit __maybe_unused 1419/* Only safe to use early in boot when initialisation is single-threaded */
1420meminit_pfn_in_nid(unsigned long pfn, int node, 1420static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1421 struct mminit_pfnnid_cache *state)
1422{ 1421{
1423 int nid; 1422 int nid;
1424 1423
1425 nid = __early_pfn_to_nid(pfn, state); 1424 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1426 if (nid >= 0 && nid != node) 1425 if (nid >= 0 && nid != node)
1427 return false; 1426 return false;
1428 return true; 1427 return true;
1429} 1428}
1430 1429
1431/* Only safe to use early in boot when initialisation is single-threaded */
1432static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1433{
1434 return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
1435}
1436
1437#else 1430#else
1438
1439static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) 1431static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1440{ 1432{
1441 return true; 1433 return true;
1442} 1434}
1443static inline bool __meminit __maybe_unused
1444meminit_pfn_in_nid(unsigned long pfn, int node,
1445 struct mminit_pfnnid_cache *state)
1446{
1447 return true;
1448}
1449#endif 1435#endif
1450 1436
1451 1437
@@ -1574,21 +1560,13 @@ static inline void __init pgdat_init_report_one_done(void)
1574 * 1560 *
1575 * Then, we check if a current large page is valid by only checking the validity 1561 * Then, we check if a current large page is valid by only checking the validity
1576 * of the head pfn. 1562 * of the head pfn.
1577 *
1578 * Finally, meminit_pfn_in_nid is checked on systems where pfns can interleave
1579 * within a node: a pfn is between start and end of a node, but does not belong
1580 * to this memory node.
1581 */ 1563 */
1582static inline bool __init 1564static inline bool __init deferred_pfn_valid(unsigned long pfn)
1583deferred_pfn_valid(int nid, unsigned long pfn,
1584 struct mminit_pfnnid_cache *nid_init_state)
1585{ 1565{
1586 if (!pfn_valid_within(pfn)) 1566 if (!pfn_valid_within(pfn))
1587 return false; 1567 return false;
1588 if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn)) 1568 if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
1589 return false; 1569 return false;
1590 if (!meminit_pfn_in_nid(pfn, nid, nid_init_state))
1591 return false;
1592 return true; 1570 return true;
1593} 1571}
1594 1572
@@ -1596,15 +1574,14 @@ deferred_pfn_valid(int nid, unsigned long pfn,
1596 * Free pages to buddy allocator. Try to free aligned pages in 1574 * Free pages to buddy allocator. Try to free aligned pages in
1597 * pageblock_nr_pages sizes. 1575 * pageblock_nr_pages sizes.
1598 */ 1576 */
1599static void __init deferred_free_pages(int nid, int zid, unsigned long pfn, 1577static void __init deferred_free_pages(unsigned long pfn,
1600 unsigned long end_pfn) 1578 unsigned long end_pfn)
1601{ 1579{
1602 struct mminit_pfnnid_cache nid_init_state = { };
1603 unsigned long nr_pgmask = pageblock_nr_pages - 1; 1580 unsigned long nr_pgmask = pageblock_nr_pages - 1;
1604 unsigned long nr_free = 0; 1581 unsigned long nr_free = 0;
1605 1582
1606 for (; pfn < end_pfn; pfn++) { 1583 for (; pfn < end_pfn; pfn++) {
1607 if (!deferred_pfn_valid(nid, pfn, &nid_init_state)) { 1584 if (!deferred_pfn_valid(pfn)) {
1608 deferred_free_range(pfn - nr_free, nr_free); 1585 deferred_free_range(pfn - nr_free, nr_free);
1609 nr_free = 0; 1586 nr_free = 0;
1610 } else if (!(pfn & nr_pgmask)) { 1587 } else if (!(pfn & nr_pgmask)) {
@@ -1624,17 +1601,18 @@ static void __init deferred_free_pages(int nid, int zid, unsigned long pfn,
1624 * by performing it only once every pageblock_nr_pages. 1601 * by performing it only once every pageblock_nr_pages.
1625 * Return number of pages initialized. 1602 * Return number of pages initialized.
1626 */ 1603 */
1627static unsigned long __init deferred_init_pages(int nid, int zid, 1604static unsigned long __init deferred_init_pages(struct zone *zone,
1628 unsigned long pfn, 1605 unsigned long pfn,
1629 unsigned long end_pfn) 1606 unsigned long end_pfn)
1630{ 1607{
1631 struct mminit_pfnnid_cache nid_init_state = { };
1632 unsigned long nr_pgmask = pageblock_nr_pages - 1; 1608 unsigned long nr_pgmask = pageblock_nr_pages - 1;
1609 int nid = zone_to_nid(zone);
1633 unsigned long nr_pages = 0; 1610 unsigned long nr_pages = 0;
1611 int zid = zone_idx(zone);
1634 struct page *page = NULL; 1612 struct page *page = NULL;
1635 1613
1636 for (; pfn < end_pfn; pfn++) { 1614 for (; pfn < end_pfn; pfn++) {
1637 if (!deferred_pfn_valid(nid, pfn, &nid_init_state)) { 1615 if (!deferred_pfn_valid(pfn)) {
1638 page = NULL; 1616 page = NULL;
1639 continue; 1617 continue;
1640 } else if (!page || !(pfn & nr_pgmask)) { 1618 } else if (!page || !(pfn & nr_pgmask)) {
@@ -1697,12 +1675,12 @@ static int __init deferred_init_memmap(void *data)
1697 for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) { 1675 for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
1698 spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa)); 1676 spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
1699 epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa)); 1677 epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
1700 nr_pages += deferred_init_pages(nid, zid, spfn, epfn); 1678 nr_pages += deferred_init_pages(zone, spfn, epfn);
1701 } 1679 }
1702 for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) { 1680 for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
1703 spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa)); 1681 spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
1704 epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa)); 1682 epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
1705 deferred_free_pages(nid, zid, spfn, epfn); 1683 deferred_free_pages(spfn, epfn);
1706 } 1684 }
1707 pgdat_resize_unlock(pgdat, &flags); 1685 pgdat_resize_unlock(pgdat, &flags);
1708 1686
@@ -1734,7 +1712,6 @@ static int __init deferred_init_memmap(void *data)
1734static noinline bool __init 1712static noinline bool __init
1735deferred_grow_zone(struct zone *zone, unsigned int order) 1713deferred_grow_zone(struct zone *zone, unsigned int order)
1736{ 1714{
1737 int zid = zone_idx(zone);
1738 int nid = zone_to_nid(zone); 1715 int nid = zone_to_nid(zone);
1739 pg_data_t *pgdat = NODE_DATA(nid); 1716 pg_data_t *pgdat = NODE_DATA(nid);
1740 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION); 1717 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
@@ -1784,7 +1761,7 @@ deferred_grow_zone(struct zone *zone, unsigned int order)
1784 while (spfn < epfn && nr_pages < nr_pages_needed) { 1761 while (spfn < epfn && nr_pages < nr_pages_needed) {
1785 t = ALIGN(spfn + PAGES_PER_SECTION, PAGES_PER_SECTION); 1762 t = ALIGN(spfn + PAGES_PER_SECTION, PAGES_PER_SECTION);
1786 first_deferred_pfn = min(t, epfn); 1763 first_deferred_pfn = min(t, epfn);
1787 nr_pages += deferred_init_pages(nid, zid, spfn, 1764 nr_pages += deferred_init_pages(zone, spfn,
1788 first_deferred_pfn); 1765 first_deferred_pfn);
1789 spfn = first_deferred_pfn; 1766 spfn = first_deferred_pfn;
1790 } 1767 }
@@ -1796,7 +1773,7 @@ deferred_grow_zone(struct zone *zone, unsigned int order)
1796 for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) { 1773 for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
1797 spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa)); 1774 spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
1798 epfn = min_t(unsigned long, first_deferred_pfn, PFN_DOWN(epa)); 1775 epfn = min_t(unsigned long, first_deferred_pfn, PFN_DOWN(epa));
1799 deferred_free_pages(nid, zid, spfn, epfn); 1776 deferred_free_pages(spfn, epfn);
1800 1777
1801 if (first_deferred_pfn == epfn) 1778 if (first_deferred_pfn == epfn)
1802 break; 1779 break;