diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2016-03-17 17:19:11 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-17 18:09:34 -0400 |
commit | 3ed3a4f0ddffece942bb2661924d87be4ce63cb7 (patch) | |
tree | 3b47bba0ba26a0301339f4989a57346e0f76b989 | |
parent | 5057dcd0f1aaad57e07e728ba20a99e205c6b9de (diff) |
mm: cleanup *pte_alloc* interfaces
There are few things about *pte_alloc*() helpers worth cleaning up:
- 'vma' argument is unused, let's drop it;
- most __pte_alloc() callers do speculative check for pmd_none(),
before taking ptl: let's introduce pte_alloc() macro which does
the check.
The only direct user of __pte_alloc left is userfaultfd, which has
different expectation about atomicity wrt pmd.
- pte_alloc_map() and pte_alloc_map_lock() are redefined using
pte_alloc().
[sudeep.holla@arm.com: fix build for arm64 hugetlbpage]
[sfr@canb.auug.org.au: fix arch/arm/mm/mmu.c some more]
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | arch/arm/mm/mmu.c | 6 | ||||
-rw-r--r-- | arch/arm/mm/pgd.c | 2 | ||||
-rw-r--r-- | arch/arm64/mm/hugetlbpage.c | 2 | ||||
-rw-r--r-- | arch/ia64/mm/hugetlbpage.c | 2 | ||||
-rw-r--r-- | arch/metag/mm/hugetlbpage.c | 2 | ||||
-rw-r--r-- | arch/parisc/mm/hugetlbpage.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/hugetlbpage.c | 2 | ||||
-rw-r--r-- | arch/sparc/mm/hugetlbpage.c | 2 | ||||
-rw-r--r-- | arch/tile/mm/hugetlbpage.c | 2 | ||||
-rw-r--r-- | arch/um/kernel/skas/mmu.c | 2 | ||||
-rw-r--r-- | arch/unicore32/mm/pgd.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/tboot.c | 2 | ||||
-rw-r--r-- | include/linux/mm.h | 17 | ||||
-rw-r--r-- | mm/memory.c | 8 | ||||
-rw-r--r-- | mm/mremap.c | 3 | ||||
-rw-r--r-- | mm/userfaultfd.c | 3 |
16 files changed, 27 insertions, 32 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 434d76f0b363..88fbe0d23ca6 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -732,7 +732,7 @@ static void *__init late_alloc(unsigned long sz) | |||
732 | return ptr; | 732 | return ptr; |
733 | } | 733 | } |
734 | 734 | ||
735 | static pte_t * __init pte_alloc(pmd_t *pmd, unsigned long addr, | 735 | static pte_t * __init arm_pte_alloc(pmd_t *pmd, unsigned long addr, |
736 | unsigned long prot, | 736 | unsigned long prot, |
737 | void *(*alloc)(unsigned long sz)) | 737 | void *(*alloc)(unsigned long sz)) |
738 | { | 738 | { |
@@ -747,7 +747,7 @@ static pte_t * __init pte_alloc(pmd_t *pmd, unsigned long addr, | |||
747 | static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, | 747 | static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, |
748 | unsigned long prot) | 748 | unsigned long prot) |
749 | { | 749 | { |
750 | return pte_alloc(pmd, addr, prot, early_alloc); | 750 | return arm_pte_alloc(pmd, addr, prot, early_alloc); |
751 | } | 751 | } |
752 | 752 | ||
753 | static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | 753 | static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, |
@@ -756,7 +756,7 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | |||
756 | void *(*alloc)(unsigned long sz), | 756 | void *(*alloc)(unsigned long sz), |
757 | bool ng) | 757 | bool ng) |
758 | { | 758 | { |
759 | pte_t *pte = pte_alloc(pmd, addr, type->prot_l1, alloc); | 759 | pte_t *pte = arm_pte_alloc(pmd, addr, type->prot_l1, alloc); |
760 | do { | 760 | do { |
761 | set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), | 761 | set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), |
762 | ng ? PTE_EXT_NG : 0); | 762 | ng ? PTE_EXT_NG : 0); |
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index e683db1b90a3..b8d477321730 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c | |||
@@ -80,7 +80,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
80 | if (!new_pmd) | 80 | if (!new_pmd) |
81 | goto no_pmd; | 81 | goto no_pmd; |
82 | 82 | ||
83 | new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); | 83 | new_pte = pte_alloc_map(mm, new_pmd, 0); |
84 | if (!new_pte) | 84 | if (!new_pte) |
85 | goto no_pte; | 85 | goto no_pte; |
86 | 86 | ||
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index da30529bb1f6..589fd28e1fb5 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c | |||
@@ -124,7 +124,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, | |||
124 | * will be no pte_unmap() to correspond with this | 124 | * will be no pte_unmap() to correspond with this |
125 | * pte_alloc_map(). | 125 | * pte_alloc_map(). |
126 | */ | 126 | */ |
127 | pte = pte_alloc_map(mm, NULL, pmd, addr); | 127 | pte = pte_alloc_map(mm, pmd, addr); |
128 | } else if (sz == PMD_SIZE) { | 128 | } else if (sz == PMD_SIZE) { |
129 | if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && | 129 | if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && |
130 | pud_none(*pud)) | 130 | pud_none(*pud)) |
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index f50d4b3f501a..85de86d36fdf 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c | |||
@@ -38,7 +38,7 @@ huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) | |||
38 | if (pud) { | 38 | if (pud) { |
39 | pmd = pmd_alloc(mm, pud, taddr); | 39 | pmd = pmd_alloc(mm, pud, taddr); |
40 | if (pmd) | 40 | if (pmd) |
41 | pte = pte_alloc_map(mm, NULL, pmd, taddr); | 41 | pte = pte_alloc_map(mm, pmd, taddr); |
42 | } | 42 | } |
43 | return pte; | 43 | return pte; |
44 | } | 44 | } |
diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c index 53f0f6c47027..b38700ae4e84 100644 --- a/arch/metag/mm/hugetlbpage.c +++ b/arch/metag/mm/hugetlbpage.c | |||
@@ -67,7 +67,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, | |||
67 | pgd = pgd_offset(mm, addr); | 67 | pgd = pgd_offset(mm, addr); |
68 | pud = pud_offset(pgd, addr); | 68 | pud = pud_offset(pgd, addr); |
69 | pmd = pmd_offset(pud, addr); | 69 | pmd = pmd_offset(pud, addr); |
70 | pte = pte_alloc_map(mm, NULL, pmd, addr); | 70 | pte = pte_alloc_map(mm, pmd, addr); |
71 | pgd->pgd &= ~_PAGE_SZ_MASK; | 71 | pgd->pgd &= ~_PAGE_SZ_MASK; |
72 | pgd->pgd |= _PAGE_SZHUGE; | 72 | pgd->pgd |= _PAGE_SZHUGE; |
73 | 73 | ||
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index 54ba39262b82..5d6eea925cf4 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c | |||
@@ -63,7 +63,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, | |||
63 | if (pud) { | 63 | if (pud) { |
64 | pmd = pmd_alloc(mm, pud, addr); | 64 | pmd = pmd_alloc(mm, pud, addr); |
65 | if (pmd) | 65 | if (pmd) |
66 | pte = pte_alloc_map(mm, NULL, pmd, addr); | 66 | pte = pte_alloc_map(mm, pmd, addr); |
67 | } | 67 | } |
68 | return pte; | 68 | return pte; |
69 | } | 69 | } |
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index 6385f60209b6..cc948db74878 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c | |||
@@ -35,7 +35,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, | |||
35 | if (pud) { | 35 | if (pud) { |
36 | pmd = pmd_alloc(mm, pud, addr); | 36 | pmd = pmd_alloc(mm, pud, addr); |
37 | if (pmd) | 37 | if (pmd) |
38 | pte = pte_alloc_map(mm, NULL, pmd, addr); | 38 | pte = pte_alloc_map(mm, pmd, addr); |
39 | } | 39 | } |
40 | } | 40 | } |
41 | 41 | ||
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 131eaf4ad7f5..4977800e9770 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c | |||
@@ -146,7 +146,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, | |||
146 | if (pud) { | 146 | if (pud) { |
147 | pmd = pmd_alloc(mm, pud, addr); | 147 | pmd = pmd_alloc(mm, pud, addr); |
148 | if (pmd) | 148 | if (pmd) |
149 | pte = pte_alloc_map(mm, NULL, pmd, addr); | 149 | pte = pte_alloc_map(mm, pmd, addr); |
150 | } | 150 | } |
151 | return pte; | 151 | return pte; |
152 | } | 152 | } |
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index c034dc3fe2d4..e212c64682c5 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c | |||
@@ -77,7 +77,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, | |||
77 | else { | 77 | else { |
78 | if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE]) | 78 | if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE]) |
79 | panic("Unexpected page size %#lx\n", sz); | 79 | panic("Unexpected page size %#lx\n", sz); |
80 | return pte_alloc_map(mm, NULL, pmd, addr); | 80 | return pte_alloc_map(mm, pmd, addr); |
81 | } | 81 | } |
82 | } | 82 | } |
83 | #else | 83 | #else |
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index 9591a66aa5c5..3943e9d7d13d 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c | |||
@@ -31,7 +31,7 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, | |||
31 | if (!pmd) | 31 | if (!pmd) |
32 | goto out_pmd; | 32 | goto out_pmd; |
33 | 33 | ||
34 | pte = pte_alloc_map(mm, NULL, pmd, proc); | 34 | pte = pte_alloc_map(mm, pmd, proc); |
35 | if (!pte) | 35 | if (!pte) |
36 | goto out_pte; | 36 | goto out_pte; |
37 | 37 | ||
diff --git a/arch/unicore32/mm/pgd.c b/arch/unicore32/mm/pgd.c index 2ade20d8eab3..c572a28c76c9 100644 --- a/arch/unicore32/mm/pgd.c +++ b/arch/unicore32/mm/pgd.c | |||
@@ -54,7 +54,7 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) | |||
54 | if (!new_pmd) | 54 | if (!new_pmd) |
55 | goto no_pmd; | 55 | goto no_pmd; |
56 | 56 | ||
57 | new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); | 57 | new_pte = pte_alloc_map(mm, new_pmd, 0); |
58 | if (!new_pte) | 58 | if (!new_pte) |
59 | goto no_pte; | 59 | goto no_pte; |
60 | 60 | ||
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index 91a4496db434..e72a07f20b05 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c | |||
@@ -135,7 +135,7 @@ static int map_tboot_page(unsigned long vaddr, unsigned long pfn, | |||
135 | pmd = pmd_alloc(&tboot_mm, pud, vaddr); | 135 | pmd = pmd_alloc(&tboot_mm, pud, vaddr); |
136 | if (!pmd) | 136 | if (!pmd) |
137 | return -1; | 137 | return -1; |
138 | pte = pte_alloc_map(&tboot_mm, NULL, pmd, vaddr); | 138 | pte = pte_alloc_map(&tboot_mm, pmd, vaddr); |
139 | if (!pte) | 139 | if (!pte) |
140 | return -1; | 140 | return -1; |
141 | set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot)); | 141 | set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot)); |
diff --git a/include/linux/mm.h b/include/linux/mm.h index db9df3f78de1..75d1907b9009 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1545,8 +1545,7 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm) | |||
1545 | } | 1545 | } |
1546 | #endif | 1546 | #endif |
1547 | 1547 | ||
1548 | int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, | 1548 | int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); |
1549 | pmd_t *pmd, unsigned long address); | ||
1550 | int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); | 1549 | int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); |
1551 | 1550 | ||
1552 | /* | 1551 | /* |
@@ -1672,15 +1671,15 @@ static inline void pgtable_page_dtor(struct page *page) | |||
1672 | pte_unmap(pte); \ | 1671 | pte_unmap(pte); \ |
1673 | } while (0) | 1672 | } while (0) |
1674 | 1673 | ||
1675 | #define pte_alloc_map(mm, vma, pmd, address) \ | 1674 | #define pte_alloc(mm, pmd, address) \ |
1676 | ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \ | 1675 | (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address)) |
1677 | pmd, address))? \ | 1676 | |
1678 | NULL: pte_offset_map(pmd, address)) | 1677 | #define pte_alloc_map(mm, pmd, address) \ |
1678 | (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address)) | ||
1679 | 1679 | ||
1680 | #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ | 1680 | #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ |
1681 | ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \ | 1681 | (pte_alloc(mm, pmd, address) ? \ |
1682 | pmd, address))? \ | 1682 | NULL : pte_offset_map_lock(mm, pmd, address, ptlp)) |
1683 | NULL: pte_offset_map_lock(mm, pmd, address, ptlp)) | ||
1684 | 1683 | ||
1685 | #define pte_alloc_kernel(pmd, address) \ | 1684 | #define pte_alloc_kernel(pmd, address) \ |
1686 | ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ | 1685 | ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ |
diff --git a/mm/memory.c b/mm/memory.c index 0e247642ed5b..1974fc02c4d0 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -562,8 +562,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, | |||
562 | } | 562 | } |
563 | } | 563 | } |
564 | 564 | ||
565 | int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, | 565 | int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) |
566 | pmd_t *pmd, unsigned long address) | ||
567 | { | 566 | { |
568 | spinlock_t *ptl; | 567 | spinlock_t *ptl; |
569 | pgtable_t new = pte_alloc_one(mm, address); | 568 | pgtable_t new = pte_alloc_one(mm, address); |
@@ -3419,12 +3418,11 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3419 | } | 3418 | } |
3420 | 3419 | ||
3421 | /* | 3420 | /* |
3422 | * Use __pte_alloc instead of pte_alloc_map, because we can't | 3421 | * Use pte_alloc() instead of pte_alloc_map, because we can't |
3423 | * run pte_offset_map on the pmd, if an huge pmd could | 3422 | * run pte_offset_map on the pmd, if an huge pmd could |
3424 | * materialize from under us from a different thread. | 3423 | * materialize from under us from a different thread. |
3425 | */ | 3424 | */ |
3426 | if (unlikely(pmd_none(*pmd)) && | 3425 | if (unlikely(pte_alloc(mm, pmd, address))) |
3427 | unlikely(__pte_alloc(mm, vma, pmd, address))) | ||
3428 | return VM_FAULT_OOM; | 3426 | return VM_FAULT_OOM; |
3429 | /* | 3427 | /* |
3430 | * If a huge pmd materialized under us just retry later. Use | 3428 | * If a huge pmd materialized under us just retry later. Use |
diff --git a/mm/mremap.c b/mm/mremap.c index e30c8a6489a6..3fa0a467df66 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
@@ -213,8 +213,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, | |||
213 | continue; | 213 | continue; |
214 | VM_BUG_ON(pmd_trans_huge(*old_pmd)); | 214 | VM_BUG_ON(pmd_trans_huge(*old_pmd)); |
215 | } | 215 | } |
216 | if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma, | 216 | if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr)) |
217 | new_pmd, new_addr)) | ||
218 | break; | 217 | break; |
219 | next = (new_addr + PMD_SIZE) & PMD_MASK; | 218 | next = (new_addr + PMD_SIZE) & PMD_MASK; |
220 | if (extent > next - new_addr) | 219 | if (extent > next - new_addr) |
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 806b0c758c5b..9f3a0290b273 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c | |||
@@ -230,8 +230,7 @@ retry: | |||
230 | break; | 230 | break; |
231 | } | 231 | } |
232 | if (unlikely(pmd_none(dst_pmdval)) && | 232 | if (unlikely(pmd_none(dst_pmdval)) && |
233 | unlikely(__pte_alloc(dst_mm, dst_vma, dst_pmd, | 233 | unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) { |
234 | dst_addr))) { | ||
235 | err = -ENOMEM; | 234 | err = -ENOMEM; |
236 | break; | 235 | break; |
237 | } | 236 | } |