aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/gup.c97
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/sparse.c17
3 files changed, 79 insertions, 45 deletions
diff --git a/mm/gup.c b/mm/gup.c
index b2b4d4263768..dfcde13f289a 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1643,6 +1643,47 @@ static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
1643 return 1; 1643 return 1;
1644} 1644}
1645 1645
1646static void gup_pgd_range(unsigned long addr, unsigned long end,
1647 int write, struct page **pages, int *nr)
1648{
1649 unsigned long next;
1650 pgd_t *pgdp;
1651
1652 pgdp = pgd_offset(current->mm, addr);
1653 do {
1654 pgd_t pgd = READ_ONCE(*pgdp);
1655
1656 next = pgd_addr_end(addr, end);
1657 if (pgd_none(pgd))
1658 return;
1659 if (unlikely(pgd_huge(pgd))) {
1660 if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
1661 pages, nr))
1662 return;
1663 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
1664 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
1665 PGDIR_SHIFT, next, write, pages, nr))
1666 return;
1667 } else if (!gup_p4d_range(pgd, addr, next, write, pages, nr))
1668 return;
1669 } while (pgdp++, addr = next, addr != end);
1670}
1671
1672#ifndef gup_fast_permitted
1673/*
1674 * Check if it's allowed to use __get_user_pages_fast() for the range, or
1675 * we need to fall back to the slow version:
1676 */
1677bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
1678{
1679 unsigned long len, end;
1680
1681 len = (unsigned long) nr_pages << PAGE_SHIFT;
1682 end = start + len;
1683 return end >= start;
1684}
1685#endif
1686
1646/* 1687/*
1647 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to 1688 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
1648 * the regular GUP. It will only return non-negative values. 1689 * the regular GUP. It will only return non-negative values.
@@ -1650,10 +1691,8 @@ static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
1650int __get_user_pages_fast(unsigned long start, int nr_pages, int write, 1691int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1651 struct page **pages) 1692 struct page **pages)
1652{ 1693{
1653 struct mm_struct *mm = current->mm;
1654 unsigned long addr, len, end; 1694 unsigned long addr, len, end;
1655 unsigned long next, flags; 1695 unsigned long flags;
1656 pgd_t *pgdp;
1657 int nr = 0; 1696 int nr = 0;
1658 1697
1659 start &= PAGE_MASK; 1698 start &= PAGE_MASK;
@@ -1677,45 +1716,15 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1677 * block IPIs that come from THPs splitting. 1716 * block IPIs that come from THPs splitting.
1678 */ 1717 */
1679 1718
1680 local_irq_save(flags); 1719 if (gup_fast_permitted(start, nr_pages, write)) {
1681 pgdp = pgd_offset(mm, addr); 1720 local_irq_save(flags);
1682 do { 1721 gup_pgd_range(addr, end, write, pages, &nr);
1683 pgd_t pgd = READ_ONCE(*pgdp); 1722 local_irq_restore(flags);
1684 1723 }
1685 next = pgd_addr_end(addr, end);
1686 if (pgd_none(pgd))
1687 break;
1688 if (unlikely(pgd_huge(pgd))) {
1689 if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
1690 pages, &nr))
1691 break;
1692 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
1693 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
1694 PGDIR_SHIFT, next, write, pages, &nr))
1695 break;
1696 } else if (!gup_p4d_range(pgd, addr, next, write, pages, &nr))
1697 break;
1698 } while (pgdp++, addr = next, addr != end);
1699 local_irq_restore(flags);
1700 1724
1701 return nr; 1725 return nr;
1702} 1726}
1703 1727
1704#ifndef gup_fast_permitted
1705/*
1706 * Check if it's allowed to use __get_user_pages_fast() for the range, or
1707 * we need to fall back to the slow version:
1708 */
1709bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
1710{
1711 unsigned long len, end;
1712
1713 len = (unsigned long) nr_pages << PAGE_SHIFT;
1714 end = start + len;
1715 return end >= start;
1716}
1717#endif
1718
1719/** 1728/**
1720 * get_user_pages_fast() - pin user pages in memory 1729 * get_user_pages_fast() - pin user pages in memory
1721 * @start: starting user address 1730 * @start: starting user address
@@ -1735,12 +1744,22 @@ bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
1735int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1744int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1736 struct page **pages) 1745 struct page **pages)
1737{ 1746{
1747 unsigned long addr, len, end;
1738 int nr = 0, ret = 0; 1748 int nr = 0, ret = 0;
1739 1749
1740 start &= PAGE_MASK; 1750 start &= PAGE_MASK;
1751 addr = start;
1752 len = (unsigned long) nr_pages << PAGE_SHIFT;
1753 end = start + len;
1754
1755 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
1756 (void __user *)start, len)))
1757 return 0;
1741 1758
1742 if (gup_fast_permitted(start, nr_pages, write)) { 1759 if (gup_fast_permitted(start, nr_pages, write)) {
1743 nr = __get_user_pages_fast(start, nr_pages, write, pages); 1760 local_irq_disable();
1761 gup_pgd_range(addr, end, write, pages, &nr);
1762 local_irq_enable();
1744 ret = nr; 1763 ret = nr;
1745 } 1764 }
1746 1765
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 77e4d3c5c57b..8dfd13f724d9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5646,6 +5646,16 @@ void __init sparse_memory_present_with_active_regions(int nid)
5646 unsigned long start_pfn, end_pfn; 5646 unsigned long start_pfn, end_pfn;
5647 int i, this_nid; 5647 int i, this_nid;
5648 5648
5649#ifdef CONFIG_SPARSEMEM_EXTREME
5650 if (!mem_section) {
5651 unsigned long size, align;
5652
5653 size = sizeof(struct mem_section) * NR_SECTION_ROOTS;
5654 align = 1 << (INTERNODE_CACHE_SHIFT);
5655 mem_section = memblock_virt_alloc(size, align);
5656 }
5657#endif
5658
5649 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) 5659 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
5650 memory_present(this_nid, start_pfn, end_pfn); 5660 memory_present(this_nid, start_pfn, end_pfn);
5651} 5661}
diff --git a/mm/sparse.c b/mm/sparse.c
index 83b3bf6461af..b00a97398795 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -22,8 +22,7 @@
22 * 1) mem_section - memory sections, mem_map's for valid memory 22 * 1) mem_section - memory sections, mem_map's for valid memory
23 */ 23 */
24#ifdef CONFIG_SPARSEMEM_EXTREME 24#ifdef CONFIG_SPARSEMEM_EXTREME
25struct mem_section *mem_section[NR_SECTION_ROOTS] 25struct mem_section **mem_section;
26 ____cacheline_internodealigned_in_smp;
27#else 26#else
28struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] 27struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
29 ____cacheline_internodealigned_in_smp; 28 ____cacheline_internodealigned_in_smp;
@@ -100,7 +99,7 @@ static inline int sparse_index_init(unsigned long section_nr, int nid)
100int __section_nr(struct mem_section* ms) 99int __section_nr(struct mem_section* ms)
101{ 100{
102 unsigned long root_nr; 101 unsigned long root_nr;
103 struct mem_section* root; 102 struct mem_section *root = NULL;
104 103
105 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { 104 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
106 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); 105 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
@@ -111,7 +110,7 @@ int __section_nr(struct mem_section* ms)
111 break; 110 break;
112 } 111 }
113 112
114 VM_BUG_ON(root_nr == NR_SECTION_ROOTS); 113 VM_BUG_ON(!root);
115 114
116 return (root_nr * SECTIONS_PER_ROOT) + (ms - root); 115 return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
117} 116}
@@ -329,11 +328,17 @@ again:
329static void __init check_usemap_section_nr(int nid, unsigned long *usemap) 328static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
330{ 329{
331 unsigned long usemap_snr, pgdat_snr; 330 unsigned long usemap_snr, pgdat_snr;
332 static unsigned long old_usemap_snr = NR_MEM_SECTIONS; 331 static unsigned long old_usemap_snr;
333 static unsigned long old_pgdat_snr = NR_MEM_SECTIONS; 332 static unsigned long old_pgdat_snr;
334 struct pglist_data *pgdat = NODE_DATA(nid); 333 struct pglist_data *pgdat = NODE_DATA(nid);
335 int usemap_nid; 334 int usemap_nid;
336 335
336 /* First call */
337 if (!old_usemap_snr) {
338 old_usemap_snr = NR_MEM_SECTIONS;
339 old_pgdat_snr = NR_MEM_SECTIONS;
340 }
341
337 usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT); 342 usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
338 pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); 343 pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
339 if (usemap_snr == pgdat_snr) 344 if (usemap_snr == pgdat_snr)