aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 21:16:22 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:40 -0400
commit1bb3630e89cb8a7b3d3807629c20c5bad88290ff (patch)
tree3d1fd73487ca66f227701b9530f2c76fcc6f9da4
parent872fec16d9a0ed3b75b8893aa217e49cca575ee5 (diff)
[PATCH] mm: ptd_alloc inline and out
It seems odd to me that, whereas pud_alloc and pmd_alloc test inline, only calling out-of-line __pud_alloc __pmd_alloc if allocation needed, pte_alloc_map and pte_alloc_kernel are entirely out-of-line. Though it does add a little to kernel size, change them to macros testing inline, calling __pte_alloc or __pte_alloc_kernel to allocate out-of-line. Mark none of them as fastcalls, leave that to CONFIG_REGPARM or not. It also seems more natural for the out-of-line functions to leave the offset calculation and map to the inline, which has to do it anyway for the common case. At least mremap move wants __pte_alloc without _map. Macros rather than inline functions, certainly to avoid the header file issues which arise from CONFIG_HIGHPTE needing kmap_types.h, but also in case any architectures I haven't built would have other such problems. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/asm-generic/4level-fixup.h11
-rw-r--r--include/linux/mm.h38
-rw-r--r--mm/memory.c95
-rw-r--r--mm/mremap.c7
4 files changed, 62 insertions, 89 deletions
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
index c20ec257ecc0..68c6fea994d9 100644
--- a/include/asm-generic/4level-fixup.h
+++ b/include/asm-generic/4level-fixup.h
@@ -10,14 +10,9 @@
10 10
11#define pud_t pgd_t 11#define pud_t pgd_t
12 12
13#define pmd_alloc(mm, pud, address) \ 13#define pmd_alloc(mm, pud, address) \
14({ pmd_t *ret; \ 14 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
15 if (pgd_none(*pud)) \ 15 NULL: pmd_offset(pud, address))
16 ret = __pmd_alloc(mm, pud, address); \
17 else \
18 ret = pmd_offset(pud, address); \
19 ret; \
20})
21 16
22#define pud_alloc(mm, pgd, address) (pgd) 17#define pud_alloc(mm, pgd, address) (pgd)
23#define pud_offset(pgd, start) (pgd) 18#define pud_offset(pgd, start) (pgd)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b9fa82b96d9e..22c2d6922c0e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -704,10 +704,6 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
704} 704}
705 705
706extern int vmtruncate(struct inode * inode, loff_t offset); 706extern int vmtruncate(struct inode * inode, loff_t offset);
707extern pud_t *FASTCALL(__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address));
708extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address));
709extern pte_t *FASTCALL(pte_alloc_kernel(pmd_t *pmd, unsigned long address));
710extern pte_t *FASTCALL(pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address));
711extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot); 707extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot);
712extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot); 708extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot);
713extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access); 709extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access);
@@ -760,32 +756,36 @@ struct shrinker;
760extern struct shrinker *set_shrinker(int, shrinker_t); 756extern struct shrinker *set_shrinker(int, shrinker_t);
761extern void remove_shrinker(struct shrinker *shrinker); 757extern void remove_shrinker(struct shrinker *shrinker);
762 758
763/* 759int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
764 * On a two-level or three-level page table, this ends up being trivial. Thus 760int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
765 * the inlining and the symmetry break with pte_alloc_map() that does all 761int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
766 * of this out-of-line. 762int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
767 */ 763
768/* 764/*
769 * The following ifdef needed to get the 4level-fixup.h header to work. 765 * The following ifdef needed to get the 4level-fixup.h header to work.
770 * Remove it when 4level-fixup.h has been removed. 766 * Remove it when 4level-fixup.h has been removed.
771 */ 767 */
772#ifdef CONFIG_MMU 768#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
773#ifndef __ARCH_HAS_4LEVEL_HACK
774static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 769static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
775{ 770{
776 if (pgd_none(*pgd)) 771 return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
777 return __pud_alloc(mm, pgd, address); 772 NULL: pud_offset(pgd, address);
778 return pud_offset(pgd, address);
779} 773}
780 774
781static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 775static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
782{ 776{
783 if (pud_none(*pud)) 777 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
784 return __pmd_alloc(mm, pud, address); 778 NULL: pmd_offset(pud, address);
785 return pmd_offset(pud, address);
786} 779}
787#endif 780#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
788#endif /* CONFIG_MMU */ 781
782#define pte_alloc_map(mm, pmd, address) \
783 ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
784 NULL: pte_offset_map(pmd, address))
785
786#define pte_alloc_kernel(pmd, address) \
787 ((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
788 NULL: pte_offset_kernel(pmd, address))
789 789
790extern void free_area_init(unsigned long * zones_size); 790extern void free_area_init(unsigned long * zones_size);
791extern void free_area_init_node(int nid, pg_data_t *pgdat, 791extern void free_area_init_node(int nid, pg_data_t *pgdat,
diff --git a/mm/memory.c b/mm/memory.c
index 95a4553c75f7..4bdd1186b43b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -280,50 +280,39 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
280 } 280 }
281} 281}
282 282
283pte_t fastcall *pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, 283int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
284 unsigned long address)
285{ 284{
286 if (!pmd_present(*pmd)) { 285 struct page *new;
287 struct page *new;
288 286
289 spin_unlock(&mm->page_table_lock); 287 spin_unlock(&mm->page_table_lock);
290 new = pte_alloc_one(mm, address); 288 new = pte_alloc_one(mm, address);
291 spin_lock(&mm->page_table_lock); 289 spin_lock(&mm->page_table_lock);
292 if (!new) 290 if (!new)
293 return NULL; 291 return -ENOMEM;
294 /* 292
295 * Because we dropped the lock, we should re-check the 293 if (pmd_present(*pmd)) /* Another has populated it */
296 * entry, as somebody else could have populated it.. 294 pte_free(new);
297 */ 295 else {
298 if (pmd_present(*pmd)) {
299 pte_free(new);
300 goto out;
301 }
302 mm->nr_ptes++; 296 mm->nr_ptes++;
303 inc_page_state(nr_page_table_pages); 297 inc_page_state(nr_page_table_pages);
304 pmd_populate(mm, pmd, new); 298 pmd_populate(mm, pmd, new);
305 } 299 }
306out: 300 return 0;
307 return pte_offset_map(pmd, address);
308} 301}
309 302
310pte_t fastcall * pte_alloc_kernel(pmd_t *pmd, unsigned long address) 303int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
311{ 304{
312 if (!pmd_present(*pmd)) { 305 pte_t *new = pte_alloc_one_kernel(&init_mm, address);
313 pte_t *new; 306 if (!new)
314 307 return -ENOMEM;
315 new = pte_alloc_one_kernel(&init_mm, address); 308
316 if (!new) 309 spin_lock(&init_mm.page_table_lock);
317 return NULL; 310 if (pmd_present(*pmd)) /* Another has populated it */
318 311 pte_free_kernel(new);
319 spin_lock(&init_mm.page_table_lock); 312 else
320 if (pmd_present(*pmd)) 313 pmd_populate_kernel(&init_mm, pmd, new);
321 pte_free_kernel(new); 314 spin_unlock(&init_mm.page_table_lock);
322 else 315 return 0;
323 pmd_populate_kernel(&init_mm, pmd, new);
324 spin_unlock(&init_mm.page_table_lock);
325 }
326 return pte_offset_kernel(pmd, address);
327} 316}
328 317
329static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss) 318static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
@@ -2093,7 +2082,7 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2093 * Allocate page upper directory. 2082 * Allocate page upper directory.
2094 * We've already handled the fast-path in-line. 2083 * We've already handled the fast-path in-line.
2095 */ 2084 */
2096pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 2085int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
2097{ 2086{
2098 pud_t *new; 2087 pud_t *new;
2099 2088
@@ -2103,19 +2092,17 @@ pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long addr
2103 if (!new) { 2092 if (!new) {
2104 if (mm != &init_mm) /* Temporary bridging hack */ 2093 if (mm != &init_mm) /* Temporary bridging hack */
2105 spin_lock(&mm->page_table_lock); 2094 spin_lock(&mm->page_table_lock);
2106 return NULL; 2095 return -ENOMEM;
2107 } 2096 }
2108 2097
2109 spin_lock(&mm->page_table_lock); 2098 spin_lock(&mm->page_table_lock);
2110 if (pgd_present(*pgd)) { 2099 if (pgd_present(*pgd)) /* Another has populated it */
2111 pud_free(new); 2100 pud_free(new);
2112 goto out; 2101 else
2113 } 2102 pgd_populate(mm, pgd, new);
2114 pgd_populate(mm, pgd, new);
2115 out:
2116 if (mm == &init_mm) /* Temporary bridging hack */ 2103 if (mm == &init_mm) /* Temporary bridging hack */
2117 spin_unlock(&mm->page_table_lock); 2104 spin_unlock(&mm->page_table_lock);
2118 return pud_offset(pgd, address); 2105 return 0;
2119} 2106}
2120#endif /* __PAGETABLE_PUD_FOLDED */ 2107#endif /* __PAGETABLE_PUD_FOLDED */
2121 2108
@@ -2124,7 +2111,7 @@ pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long addr
2124 * Allocate page middle directory. 2111 * Allocate page middle directory.
2125 * We've already handled the fast-path in-line. 2112 * We've already handled the fast-path in-line.
2126 */ 2113 */
2127pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 2114int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2128{ 2115{
2129 pmd_t *new; 2116 pmd_t *new;
2130 2117
@@ -2134,28 +2121,24 @@ pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr
2134 if (!new) { 2121 if (!new) {
2135 if (mm != &init_mm) /* Temporary bridging hack */ 2122 if (mm != &init_mm) /* Temporary bridging hack */
2136 spin_lock(&mm->page_table_lock); 2123 spin_lock(&mm->page_table_lock);
2137 return NULL; 2124 return -ENOMEM;
2138 } 2125 }
2139 2126
2140 spin_lock(&mm->page_table_lock); 2127 spin_lock(&mm->page_table_lock);
2141#ifndef __ARCH_HAS_4LEVEL_HACK 2128#ifndef __ARCH_HAS_4LEVEL_HACK
2142 if (pud_present(*pud)) { 2129 if (pud_present(*pud)) /* Another has populated it */
2143 pmd_free(new); 2130 pmd_free(new);
2144 goto out; 2131 else
2145 } 2132 pud_populate(mm, pud, new);
2146 pud_populate(mm, pud, new);
2147#else 2133#else
2148 if (pgd_present(*pud)) { 2134 if (pgd_present(*pud)) /* Another has populated it */
2149 pmd_free(new); 2135 pmd_free(new);
2150 goto out; 2136 else
2151 } 2137 pgd_populate(mm, pud, new);
2152 pgd_populate(mm, pud, new);
2153#endif /* __ARCH_HAS_4LEVEL_HACK */ 2138#endif /* __ARCH_HAS_4LEVEL_HACK */
2154
2155 out:
2156 if (mm == &init_mm) /* Temporary bridging hack */ 2139 if (mm == &init_mm) /* Temporary bridging hack */
2157 spin_unlock(&mm->page_table_lock); 2140 spin_unlock(&mm->page_table_lock);
2158 return pmd_offset(pud, address); 2141 return 0;
2159} 2142}
2160#endif /* __PAGETABLE_PMD_FOLDED */ 2143#endif /* __PAGETABLE_PMD_FOLDED */
2161 2144
diff --git a/mm/mremap.c b/mm/mremap.c
index ccf456477020..616facc3d28a 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -51,7 +51,6 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
51 pgd_t *pgd; 51 pgd_t *pgd;
52 pud_t *pud; 52 pud_t *pud;
53 pmd_t *pmd = NULL; 53 pmd_t *pmd = NULL;
54 pte_t *pte;
55 54
56 /* 55 /*
57 * We do need page_table_lock: because allocators expect that. 56 * We do need page_table_lock: because allocators expect that.
@@ -66,12 +65,8 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
66 if (!pmd) 65 if (!pmd)
67 goto out; 66 goto out;
68 67
69 pte = pte_alloc_map(mm, pmd, addr); 68 if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr))
70 if (!pte) {
71 pmd = NULL; 69 pmd = NULL;
72 goto out;
73 }
74 pte_unmap(pte);
75out: 70out:
76 spin_unlock(&mm->page_table_lock); 71 spin_unlock(&mm->page_table_lock);
77 return pmd; 72 return pmd;