aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 21:16:22 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:40 -0400
commit1bb3630e89cb8a7b3d3807629c20c5bad88290ff (patch)
tree3d1fd73487ca66f227701b9530f2c76fcc6f9da4 /mm
parent872fec16d9a0ed3b75b8893aa217e49cca575ee5 (diff)
[PATCH] mm: ptd_alloc inline and out
It seems odd to me that, whereas pud_alloc and pmd_alloc test inline, only calling out-of-line __pud_alloc __pmd_alloc if allocation needed, pte_alloc_map and pte_alloc_kernel are entirely out-of-line. Though it does add a little to kernel size, change them to macros testing inline, calling __pte_alloc or __pte_alloc_kernel to allocate out-of-line. Mark none of them as fastcalls, leave that to CONFIG_REGPARM or not. It also seems more natural for the out-of-line functions to leave the offset calculation and map to the inline, which has to do it anyway for the common case. At least mremap move wants __pte_alloc without _map. Macros rather than inline functions, certainly to avoid the header file issues which arise from CONFIG_HIGHPTE needing kmap_types.h, but also in case any architectures I haven't built would have other such problems. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c95
-rw-r--r--mm/mremap.c7
2 files changed, 40 insertions, 62 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 95a4553c75f7..4bdd1186b43b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -280,50 +280,39 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
280 } 280 }
281} 281}
282 282
283pte_t fastcall *pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, 283int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
284 unsigned long address)
285{ 284{
286 if (!pmd_present(*pmd)) { 285 struct page *new;
287 struct page *new;
288 286
289 spin_unlock(&mm->page_table_lock); 287 spin_unlock(&mm->page_table_lock);
290 new = pte_alloc_one(mm, address); 288 new = pte_alloc_one(mm, address);
291 spin_lock(&mm->page_table_lock); 289 spin_lock(&mm->page_table_lock);
292 if (!new) 290 if (!new)
293 return NULL; 291 return -ENOMEM;
294 /* 292
295 * Because we dropped the lock, we should re-check the 293 if (pmd_present(*pmd)) /* Another has populated it */
296 * entry, as somebody else could have populated it.. 294 pte_free(new);
297 */ 295 else {
298 if (pmd_present(*pmd)) {
299 pte_free(new);
300 goto out;
301 }
302 mm->nr_ptes++; 296 mm->nr_ptes++;
303 inc_page_state(nr_page_table_pages); 297 inc_page_state(nr_page_table_pages);
304 pmd_populate(mm, pmd, new); 298 pmd_populate(mm, pmd, new);
305 } 299 }
306out: 300 return 0;
307 return pte_offset_map(pmd, address);
308} 301}
309 302
310pte_t fastcall * pte_alloc_kernel(pmd_t *pmd, unsigned long address) 303int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
311{ 304{
312 if (!pmd_present(*pmd)) { 305 pte_t *new = pte_alloc_one_kernel(&init_mm, address);
313 pte_t *new; 306 if (!new)
314 307 return -ENOMEM;
315 new = pte_alloc_one_kernel(&init_mm, address); 308
316 if (!new) 309 spin_lock(&init_mm.page_table_lock);
317 return NULL; 310 if (pmd_present(*pmd)) /* Another has populated it */
318 311 pte_free_kernel(new);
319 spin_lock(&init_mm.page_table_lock); 312 else
320 if (pmd_present(*pmd)) 313 pmd_populate_kernel(&init_mm, pmd, new);
321 pte_free_kernel(new); 314 spin_unlock(&init_mm.page_table_lock);
322 else 315 return 0;
323 pmd_populate_kernel(&init_mm, pmd, new);
324 spin_unlock(&init_mm.page_table_lock);
325 }
326 return pte_offset_kernel(pmd, address);
327} 316}
328 317
329static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss) 318static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
@@ -2093,7 +2082,7 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2093 * Allocate page upper directory. 2082 * Allocate page upper directory.
2094 * We've already handled the fast-path in-line. 2083 * We've already handled the fast-path in-line.
2095 */ 2084 */
2096pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 2085int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
2097{ 2086{
2098 pud_t *new; 2087 pud_t *new;
2099 2088
@@ -2103,19 +2092,17 @@ pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long addr
2103 if (!new) { 2092 if (!new) {
2104 if (mm != &init_mm) /* Temporary bridging hack */ 2093 if (mm != &init_mm) /* Temporary bridging hack */
2105 spin_lock(&mm->page_table_lock); 2094 spin_lock(&mm->page_table_lock);
2106 return NULL; 2095 return -ENOMEM;
2107 } 2096 }
2108 2097
2109 spin_lock(&mm->page_table_lock); 2098 spin_lock(&mm->page_table_lock);
2110 if (pgd_present(*pgd)) { 2099 if (pgd_present(*pgd)) /* Another has populated it */
2111 pud_free(new); 2100 pud_free(new);
2112 goto out; 2101 else
2113 } 2102 pgd_populate(mm, pgd, new);
2114 pgd_populate(mm, pgd, new);
2115 out:
2116 if (mm == &init_mm) /* Temporary bridging hack */ 2103 if (mm == &init_mm) /* Temporary bridging hack */
2117 spin_unlock(&mm->page_table_lock); 2104 spin_unlock(&mm->page_table_lock);
2118 return pud_offset(pgd, address); 2105 return 0;
2119} 2106}
2120#endif /* __PAGETABLE_PUD_FOLDED */ 2107#endif /* __PAGETABLE_PUD_FOLDED */
2121 2108
@@ -2124,7 +2111,7 @@ pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long addr
2124 * Allocate page middle directory. 2111 * Allocate page middle directory.
2125 * We've already handled the fast-path in-line. 2112 * We've already handled the fast-path in-line.
2126 */ 2113 */
2127pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 2114int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2128{ 2115{
2129 pmd_t *new; 2116 pmd_t *new;
2130 2117
@@ -2134,28 +2121,24 @@ pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr
2134 if (!new) { 2121 if (!new) {
2135 if (mm != &init_mm) /* Temporary bridging hack */ 2122 if (mm != &init_mm) /* Temporary bridging hack */
2136 spin_lock(&mm->page_table_lock); 2123 spin_lock(&mm->page_table_lock);
2137 return NULL; 2124 return -ENOMEM;
2138 } 2125 }
2139 2126
2140 spin_lock(&mm->page_table_lock); 2127 spin_lock(&mm->page_table_lock);
2141#ifndef __ARCH_HAS_4LEVEL_HACK 2128#ifndef __ARCH_HAS_4LEVEL_HACK
2142 if (pud_present(*pud)) { 2129 if (pud_present(*pud)) /* Another has populated it */
2143 pmd_free(new); 2130 pmd_free(new);
2144 goto out; 2131 else
2145 } 2132 pud_populate(mm, pud, new);
2146 pud_populate(mm, pud, new);
2147#else 2133#else
2148 if (pgd_present(*pud)) { 2134 if (pgd_present(*pud)) /* Another has populated it */
2149 pmd_free(new); 2135 pmd_free(new);
2150 goto out; 2136 else
2151 } 2137 pgd_populate(mm, pud, new);
2152 pgd_populate(mm, pud, new);
2153#endif /* __ARCH_HAS_4LEVEL_HACK */ 2138#endif /* __ARCH_HAS_4LEVEL_HACK */
2154
2155 out:
2156 if (mm == &init_mm) /* Temporary bridging hack */ 2139 if (mm == &init_mm) /* Temporary bridging hack */
2157 spin_unlock(&mm->page_table_lock); 2140 spin_unlock(&mm->page_table_lock);
2158 return pmd_offset(pud, address); 2141 return 0;
2159} 2142}
2160#endif /* __PAGETABLE_PMD_FOLDED */ 2143#endif /* __PAGETABLE_PMD_FOLDED */
2161 2144
diff --git a/mm/mremap.c b/mm/mremap.c
index ccf456477020..616facc3d28a 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -51,7 +51,6 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
51 pgd_t *pgd; 51 pgd_t *pgd;
52 pud_t *pud; 52 pud_t *pud;
53 pmd_t *pmd = NULL; 53 pmd_t *pmd = NULL;
54 pte_t *pte;
55 54
56 /* 55 /*
57 * We do need page_table_lock: because allocators expect that. 56 * We do need page_table_lock: because allocators expect that.
@@ -66,12 +65,8 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
66 if (!pmd) 65 if (!pmd)
67 goto out; 66 goto out;
68 67
69 pte = pte_alloc_map(mm, pmd, addr); 68 if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr))
70 if (!pte) {
71 pmd = NULL; 69 pmd = NULL;
72 goto out;
73 }
74 pte_unmap(pte);
75out: 70out:
76 spin_unlock(&mm->page_table_lock); 71 spin_unlock(&mm->page_table_lock);
77 return pmd; 72 return pmd;